From 5bf11060108f684ddbfe0a09972575f71f07474b Mon Sep 17 00:00:00 2001 From: figbot <82115609+withfig-bot@users.noreply.github.com> Date: Tue, 22 Oct 2024 17:34:10 -0700 Subject: [PATCH] feat: update spec --- src/aws.ts | 18 +- src/aws/acm-pca.ts | 8 +- src/aws/amplify.ts | 18 +- src/aws/application-insights.ts | 66 +++- src/aws/athena.ts | 4 +- src/aws/autoscaling.ts | 4 +- src/aws/bedrock-agent.ts | 2 +- src/aws/cloudformation.ts | 130 +++---- src/aws/codebuild.ts | 14 + src/aws/codepipeline.ts | 2 +- src/aws/dataexchange.ts | 333 +++++++++++++++- src/aws/dms.ts | 383 ++++++++++++++++++- src/aws/ecs.ts | 11 +- src/aws/eks.ts | 21 +- src/aws/elastic-inference.ts | 14 +- src/aws/fms.ts | 4 +- src/aws/imagebuilder.ts | 41 +- src/aws/ivs.ts | 280 +++++++------- src/aws/m2.ts | 30 +- src/aws/mailmanager.ts | 10 + src/aws/marketplace-reporting.ts | 1 - src/aws/neptune-graph.ts | 2 +- src/aws/payment-cryptography-data.ts | 88 ++++- src/aws/pinpoint-sms-voice-v2.ts | 10 +- src/aws/qbusiness.ts | 20 +- src/aws/quicksight.ts | 58 +++ src/aws/rds.ts | 26 +- src/aws/redshift.ts | 237 +++++++++++- src/aws/repostspace.ts | 91 +++++ src/aws/resiliencehub.ts | 18 +- src/aws/robomaker.ts | 157 ++++---- src/aws/route53resolver.ts | 4 +- src/aws/s3api.ts | 40 +- src/aws/securitylake.ts | 46 +-- src/aws/sesv2.ts | 8 + src/aws/socialmessaging.ts | 548 +++++++++++++++++++++++++++ src/aws/supplychain.ts | 243 ++++++++++++ src/aws/timestream-query.ts | 21 +- src/aws/transfer.ts | 90 ++++- src/aws/wafv2.ts | 6 +- 40 files changed, 2688 insertions(+), 419 deletions(-) create mode 100644 src/aws/socialmessaging.ts diff --git a/src/aws.ts b/src/aws.ts index 5a52d2db5f7d..37bf4cbc043f 100644 --- a/src/aws.ts +++ b/src/aws.ts @@ -363,7 +363,7 @@ const completionSpec: Fig.Spec = { { name: "cloudformation", description: - "CloudFormation CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly reliable, highly scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services infrastructure. With CloudFormation, you declare all your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you. For more information about CloudFormation, see the CloudFormation product page. CloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com", + "CloudFormation CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Amazon EC2 Auto Scaling to build highly reliable, highly scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services infrastructure. With CloudFormation, you declare all your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you. For more information about CloudFormation, see the CloudFormation product page. CloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com", loadSpec: "aws/cloudformation", }, { @@ -471,7 +471,7 @@ const completionSpec: Fig.Spec = { { name: "codepipeline", description: - "CodePipeline Overview This is the CodePipeline API Reference. This guide provides descriptions of the actions and data types for CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the CodePipeline User Guide. You can use the CodePipeline API to work with pipelines, stages, actions, and transitions. Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions. You can work with pipelines by calling: CreatePipeline, which creates a uniquely named pipeline. DeletePipeline, which deletes the specified pipeline. GetPipeline, which returns information about the pipeline structure and pipeline metadata, including the pipeline Amazon Resource Name (ARN). GetPipelineExecution, which returns information about a specific execution of a pipeline. GetPipelineState, which returns information about the current state of the stages and actions of a pipeline. ListActionExecutions, which returns action-level details for past executions. The details include full stage and action-level details, including individual action duration, status, any errors that occurred during the execution, and input and output artifact location details. ListPipelines, which gets a summary of all of the pipelines associated with your account. ListPipelineExecutions, which gets a summary of the most recent executions for a pipeline. StartPipelineExecution, which runs the most recent revision of an artifact through the pipeline. StopPipelineExecution, which stops the specified pipeline execution from continuing through the pipeline. UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline. Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see CodePipeline Pipeline Structure Reference. Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are: Source Build Test Deploy Approval Invoke Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete. You can work with transitions by calling: DisableStageTransition, which prevents artifacts from transitioning to the next stage in a pipeline. EnableStageTransition, which enables transition of artifacts between stages in a pipeline. Using the API to integrate with CodePipeline For third-party integrators or developers who want to create their own integrations with CodePipeline, the expected sequence varies from the standard API user. To integrate with CodePipeline, developers need to work with the following items: Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source. You can work with jobs by calling: AcknowledgeJob, which confirms whether a job worker has received the specified job. GetJobDetails, which returns the details of a job. PollForJobs, which determines whether there are any jobs to act on. PutJobFailureResult, which provides details of a job failure. PutJobSuccessResult, which provides details of a job success. Third party jobs, which are instances of an action created by a partner action and integrated into CodePipeline. Partner actions are created by members of the Amazon Web Services Partner Network. You can work with third party jobs by calling: AcknowledgeThirdPartyJob, which confirms whether a job worker has received the specified job. GetThirdPartyJobDetails, which requests the details of a job for a partner action. PollForThirdPartyJobs, which determines whether there are any jobs to act on. PutThirdPartyJobFailureResult, which provides details of a job failure. PutThirdPartyJobSuccessResult, which provides details of a job success", + "CodePipeline Overview This is the CodePipeline API Reference. This guide provides descriptions of the actions and data types for CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the CodePipeline User Guide. You can use the CodePipeline API to work with pipelines, stages, actions, and transitions. Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions. You can work with pipelines by calling: CreatePipeline, which creates a uniquely named pipeline. DeletePipeline, which deletes the specified pipeline. GetPipeline, which returns information about the pipeline structure and pipeline metadata, including the pipeline Amazon Resource Name (ARN). GetPipelineExecution, which returns information about a specific execution of a pipeline. GetPipelineState, which returns information about the current state of the stages and actions of a pipeline. ListActionExecutions, which returns action-level details for past executions. The details include full stage and action-level details, including individual action duration, status, any errors that occurred during the execution, and input and output artifact location details. ListPipelines, which gets a summary of all of the pipelines associated with your account. ListPipelineExecutions, which gets a summary of the most recent executions for a pipeline. StartPipelineExecution, which runs the most recent revision of an artifact through the pipeline. StopPipelineExecution, which stops the specified pipeline execution from continuing through the pipeline. UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline. Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see CodePipeline Pipeline Structure Reference. Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are: Source Build Test Deploy Approval Invoke Compute Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete. You can work with transitions by calling: DisableStageTransition, which prevents artifacts from transitioning to the next stage in a pipeline. EnableStageTransition, which enables transition of artifacts between stages in a pipeline. Using the API to integrate with CodePipeline For third-party integrators or developers who want to create their own integrations with CodePipeline, the expected sequence varies from the standard API user. To integrate with CodePipeline, developers need to work with the following items: Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source. You can work with jobs by calling: AcknowledgeJob, which confirms whether a job worker has received the specified job. GetJobDetails, which returns the details of a job. PollForJobs, which determines whether there are any jobs to act on. PutJobFailureResult, which provides details of a job failure. PutJobSuccessResult, which provides details of a job success. Third party jobs, which are instances of an action created by a partner action and integrated into CodePipeline. Partner actions are created by members of the Amazon Web Services Partner Network. You can work with third party jobs by calling: AcknowledgeThirdPartyJob, which confirms whether a job worker has received the specified job. GetThirdPartyJobDetails, which requests the details of a job for a partner action. PollForThirdPartyJobs, which determines whether there are any jobs to act on. PutThirdPartyJobFailureResult, which provides details of a job failure. PutThirdPartyJobSuccessResult, which provides details of a job success", loadSpec: "aws/codepipeline", }, { @@ -763,7 +763,7 @@ const completionSpec: Fig.Spec = { { name: "elastic-inference", description: - "Elastic Inference public APIs. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service", + "Amazon Elastic Inference is no longer available. Elastic Inference public APIs", loadSpec: "aws/elastic-inference", }, { @@ -1955,7 +1955,7 @@ const completionSpec: Fig.Spec = { { name: "securitylake", description: - "Amazon Security Lake is a fully managed security data lake service. You can use Security Lake to automatically centralize security data from cloud, on-premises, and custom sources into a data lake that's stored in your Amazon Web Services account. Amazon Web Services Organizations is an account management service that lets you consolidate multiple Amazon Web Services accounts into an organization that you create and centrally manage. With Organizations, you can create member accounts and invite existing accounts to join your organization. Security Lake helps you analyze security data for a more complete understanding of your security posture across the entire organization. It can also help you improve the protection of your workloads, applications, and data. The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you retain ownership over your data. Amazon Security Lake integrates with CloudTrail, a service that provides a record of actions taken by a user, role, or an Amazon Web Services service. In Security Lake, CloudTrail captures API calls for Security Lake as events. The calls captured include calls from the Security Lake console and code calls to the Security Lake API operations. If you create a trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket, including events for Security Lake. If you don't configure a trail, you can still view the most recent events in the CloudTrail console in Event history. Using the information collected by CloudTrail you can determine the request that was made to Security Lake, the IP address from which the request was made, who made the request, when it was made, and additional details. To learn more about Security Lake information in CloudTrail, see the Amazon Security Lake User Guide. Security Lake automates the collection of security-related log and event data from integrated Amazon Web Services and third-party services. It also helps you manage the lifecycle of data with customizable retention and replication settings. Security Lake converts ingested data into Apache Parquet format and a standard open-source schema called the Open Cybersecurity Schema Framework (OCSF). Other Amazon Web Services and third-party services can subscribe to the data that's stored in Security Lake for incident response and security data analytics", + "Amazon Security Lake is a fully managed security data lake service. You can use Security Lake to automatically centralize security data from cloud, on-premises, and custom sources into a data lake that's stored in your Amazon Web Services account. Amazon Web Services Organizations is an account management service that lets you consolidate multiple Amazon Web Services accounts into an organization that you create and centrally manage. With Organizations, you can create member accounts and invite existing accounts to join your organization. Security Lake helps you analyze security data for a more complete understanding of your security posture across the entire organization. It can also help you improve the protection of your workloads, applications, and data. The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you retain ownership over your data. Amazon Security Lake integrates with CloudTrail, a service that provides a record of actions taken by a user, role, or an Amazon Web Services service. In Security Lake, CloudTrail captures API calls for Security Lake as events. The calls captured include calls from the Security Lake console and code calls to the Security Lake API operations. If you create a trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket, including events for Security Lake. If you don't configure a trail, you can still view the most recent events in the CloudTrail console in Event history. Using the information collected by CloudTrail you can determine the request that was made to Security Lake, the IP address from which the request was made, who made the request, when it was made, and additional details. To learn more about Security Lake information in CloudTrail, see the Amazon Security Lake User Guide. Security Lake automates the collection of security-related log and event data from integrated Amazon Web Services services and third-party services. It also helps you manage the lifecycle of data with customizable retention and replication settings. Security Lake converts ingested data into Apache Parquet format and a standard open-source schema called the Open Cybersecurity Schema Framework (OCSF). Other Amazon Web Services services and third-party services can subscribe to the data that's stored in Security Lake for incident response and security data analytics", loadSpec: "aws/securitylake", }, { @@ -2046,6 +2046,12 @@ const completionSpec: Fig.Spec = { "Amazon Simple Notification Service Amazon Simple Notification Service (Amazon SNS) is a web service that enables you to build distributed web-enabled applications. Applications can use Amazon SNS to easily push real-time notification messages to interested subscribers over multiple delivery protocols. For more information about this product see the Amazon SNS product page. For detailed information about Amazon SNS features and their associated API calls, see the Amazon SNS Developer Guide. For information on the permissions you need to use this API, see Identity and access management in Amazon SNS in the Amazon SNS Developer Guide. We also provide SDKs that enable you to access Amazon SNS from your preferred programming language. The SDKs contain functionality that automatically takes care of tasks such as: cryptographically signing your service requests, retrying requests, and handling error responses. For a list of available SDKs, go to Tools for Amazon Web Services", loadSpec: "aws/sns", }, + { + name: "socialmessaging", + description: + "Amazon Web Services End User Messaging Social, also referred to as Social messaging, is a messaging service that enables application developers to incorporate WhatsApp into their existing workflows. The Amazon Web Services End User Messaging Social API provides information about the Amazon Web Services End User Messaging Social API resources, including supported HTTP methods, parameters, and schemas. The Amazon Web Services End User Messaging Social API provides programmatic access to options that are unique to the WhatsApp Business Platform. If you're new to the Amazon Web Services End User Messaging Social API, it's also helpful to review What is Amazon Web Services End User Messaging Social in the Amazon Web Services End User Messaging Social User Guide. The Amazon Web Services End User Messaging Social User Guide provides tutorials, code samples, and procedures that demonstrate how to use Amazon Web Services End User Messaging Social API features programmatically and how to integrate functionality into applications. The guide also provides key information, such as integration with other Amazon Web Services services, and the quotas that apply to use of the service. Regional availability The Amazon Web Services End User Messaging Social API is available across several Amazon Web Services Regions and it provides a dedicated endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Web Services End User Messaging endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference. In each Region, Amazon Web Services maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see Amazon Web Services Global Infrastructure", + loadSpec: "aws/socialmessaging", + }, { name: "sqs", description: @@ -2192,7 +2198,7 @@ const completionSpec: Fig.Spec = { { name: "transfer", description: - "Transfer Family is a fully managed service that enables the transfer of files over the File Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3) or Amazon EFS. Additionally, you can use Applicability Statement 2 (AS2) to transfer files into and out of Amazon S3. Amazon Web Services helps you seamlessly migrate your file transfer workflows to Transfer Family by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53 so nothing changes for your customers and partners, or their applications. With your data in Amazon S3, you can use it with Amazon Web Services for processing, analytics, machine learning, and archiving. Getting started with Transfer Family is easy since there is no infrastructure to buy and set up", + "Transfer Family is a fully managed service that enables the transfer of files over the File Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3) or Amazon EFS. Additionally, you can use Applicability Statement 2 (AS2) to transfer files into and out of Amazon S3. Amazon Web Services helps you seamlessly migrate your file transfer workflows to Transfer Family by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53 so nothing changes for your customers and partners, or their applications. With your data in Amazon S3, you can use it with Amazon Web Services services for processing, analytics, machine learning, and archiving. Getting started with Transfer Family is easy since there is no infrastructure to buy and set up", loadSpec: "aws/transfer", }, { @@ -2239,7 +2245,7 @@ const completionSpec: Fig.Spec = { { name: "wafv2", description: - 'WAF This is the latest version of the WAF API, released in November, 2019. The names of the entities that you use to access this API, like endpoints and namespaces, all have the versioning information added, like "V2" or "v2", to distinguish from the prior version. We recommend migrating your resources to this version, because it has a number of significant improvements. If you used WAF prior to this release, you can\'t use this WAFV2 API to access any WAF resources that you created before. You can access your old rules, web ACLs, and other WAF resources only through the WAF Classic APIs. The WAF Classic APIs have retained the prior names, endpoints, and namespaces. For information, including how to migrate your WAF resources to this version, see the WAF Developer Guide. WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to an Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync GraphQL API, Amazon Cognito user pool, App Runner service, or Amazon Web Services Verified Access instance. WAF also lets you control access to your content, to protect the Amazon Web Services resource that WAF is monitoring. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, the protected resource responds to requests with either the requested content, an HTTP 403 status code (Forbidden), or with a custom response. This API guide is for developers who need detailed information about WAF API actions, data types, and errors. For detailed information about WAF features and guidance for configuring and using WAF, see the WAF Developer Guide. You can make calls using the endpoints listed in WAF endpoints and quotas. For regional applications, you can use any of the endpoints in the list. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance. For Amazon CloudFront applications, you must use the API endpoint listed for US East (N. Virginia): us-east-1. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that\'s tailored to the programming language or platform that you\'re using. For more information, see Amazon Web Services SDKs. We currently provide two versions of the WAF API: this API and the prior versions, the classic WAF APIs. This new API provides the same functionality as the older versions, with the following major improvements: You use one API for both global and regional applications. Where you need to distinguish the scope, you specify a Scope parameter and set it to CLOUDFRONT or REGIONAL. You can define a web ACL or rule group with a single call, and update it with a single call. You define all rule specifications in JSON format, and pass them to your rule group or web ACL calls. The limits WAF places on the use of rules more closely reflects the cost of running each type of rule. Rule groups include capacity settings, so you know the maximum cost of a rule group when you use it', + 'WAF This is the latest version of the WAF API, released in November, 2019. The names of the entities that you use to access this API, like endpoints and namespaces, all have the versioning information added, like "V2" or "v2", to distinguish from the prior version. We recommend migrating your resources to this version, because it has a number of significant improvements. If you used WAF prior to this release, you can\'t use this WAFV2 API to access any WAF resources that you created before. WAF Classic support will end on September 30, 2025. For information about WAF, including how to migrate your WAF Classic resources to this version, see the WAF Developer Guide. WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to an Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync GraphQL API, Amazon Cognito user pool, App Runner service, or Amazon Web Services Verified Access instance. WAF also lets you control access to your content, to protect the Amazon Web Services resource that WAF is monitoring. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, the protected resource responds to requests with either the requested content, an HTTP 403 status code (Forbidden), or with a custom response. This API guide is for developers who need detailed information about WAF API actions, data types, and errors. For detailed information about WAF features and guidance for configuring and using WAF, see the WAF Developer Guide. You can make calls using the endpoints listed in WAF endpoints and quotas. For regional applications, you can use any of the endpoints in the list. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance. For Amazon CloudFront applications, you must use the API endpoint listed for US East (N. Virginia): us-east-1. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that\'s tailored to the programming language or platform that you\'re using. For more information, see Amazon Web Services SDKs', loadSpec: "aws/wafv2", }, { diff --git a/src/aws/acm-pca.ts b/src/aws/acm-pca.ts index a921497e22da..ce7e5e787959 100644 --- a/src/aws/acm-pca.ts +++ b/src/aws/acm-pca.ts @@ -19,7 +19,7 @@ const completionSpec: Fig.Spec = { { name: "--revocation-configuration", description: - 'Contains information to enable Online Certificate Status Protocol (OCSP) support, to enable a certificate revocation list (CRL), to enable both, or to enable neither. The default is for both certificate validation mechanisms to be disabled. The following requirements apply to revocation configurations. A configuration disabling CRLs or OCSP must contain only the Enabled=False parameter, and will fail if other parameters such as CustomCname or ExpirationInDays are included. In a CRL configuration, the S3BucketName parameter must conform to Amazon S3 bucket naming rules. A configuration containing a custom Canonical Name (CNAME) parameter for CRLs or OCSP must conform to RFC2396 restrictions on the use of special characters in a CNAME. In a CRL or OCSP configuration, the value of a CNAME parameter must not include a protocol prefix such as "http://" or "https://". For more information, see the OcspConfiguration and CrlConfiguration types', + 'Contains information to enable support for Online Certificate Status Protocol (OCSP), certificate revocation list (CRL), both protocols, or neither. By default, both certificate validation mechanisms are disabled. The following requirements apply to revocation configurations. A configuration disabling CRLs or OCSP must contain only the Enabled=False parameter, and will fail if other parameters such as CustomCname or ExpirationInDays are included. In a CRL configuration, the S3BucketName parameter must conform to Amazon S3 bucket naming rules. A configuration containing a custom Canonical Name (CNAME) parameter for CRLs or OCSP must conform to RFC2396 restrictions on the use of special characters in a CNAME. In a CRL or OCSP configuration, the value of a CNAME parameter must not include a protocol prefix such as "http://" or "https://". For more information, see the OcspConfiguration and CrlConfiguration types', args: { name: "structure", }, @@ -85,7 +85,7 @@ const completionSpec: Fig.Spec = { { name: "create-certificate-authority-audit-report", description: - "Creates an audit report that lists every time that your CA private key is used. The report is saved in the Amazon S3 bucket that you specify on input. The IssueCertificate and RevokeCertificate actions use the private key. Both Amazon Web Services Private CA and the IAM principal must have permission to write to the S3 bucket that you specify. If the IAM principal making the call does not have permission to write to the bucket, then an exception is thrown. For more information, see Access policies for CRLs in Amazon S3. Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your Audit Reports. You can generate a maximum of one report every 30 minutes", + "Creates an audit report that lists every time that your CA private key is used to issue a certificate. The IssueCertificate and RevokeCertificate actions use the private key. To save the audit report to your designated Amazon S3 bucket, you must create a bucket policy that grants Amazon Web Services Private CA permission to access and write to it. For an example policy, see Prepare an Amazon S3 bucket for audit reports. Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your Audit Reports. You can generate a maximum of one report every 30 minutes", options: [ { name: "--certificate-authority-arn", @@ -515,7 +515,7 @@ const completionSpec: Fig.Spec = { { name: "import-certificate-authority-certificate", description: - "Imports a signed private CA certificate into Amazon Web Services Private CA. This action is used when you are using a chain of trust whose root is located outside Amazon Web Services Private CA. Before you can call this action, the following preparations must in place: In Amazon Web Services Private CA, call the CreateCertificateAuthority action to create the private CA that you plan to back with the imported certificate. Call the GetCertificateAuthorityCsr action to generate a certificate signing request (CSR). Sign the CSR using a root or intermediate CA hosted by either an on-premises PKI hierarchy or by a commercial CA. Create a certificate chain and copy the signed certificate and the certificate chain to your working directory. Amazon Web Services Private CA supports three scenarios for installing a CA certificate: Installing a certificate for a root CA hosted by Amazon Web Services Private CA. Installing a subordinate CA certificate whose parent authority is hosted by Amazon Web Services Private CA. Installing a subordinate CA certificate whose parent authority is externally hosted. The following additional requirements apply when you import a CA certificate. Only a self-signed certificate can be imported as a root CA. A self-signed certificate cannot be imported as a subordinate CA. Your certificate chain must not include the private CA certificate that you are importing. Your root CA must be the last certificate in your chain. The subordinate certificate, if any, that your root CA signed must be next to last. The subordinate certificate signed by the preceding subordinate CA must come next, and so on until your chain is built. The chain must be PEM-encoded. The maximum allowed size of a certificate is 32 KB. The maximum allowed size of a certificate chain is 2 MB. Enforcement of Critical Constraints Amazon Web Services Private CA allows the following extensions to be marked critical in the imported CA certificate or chain. Basic constraints (must be marked critical) Subject alternative names Key usage Extended key usage Authority key identifier Subject key identifier Issuer alternative name Subject directory attributes Subject information access Certificate policies Policy mappings Inhibit anyPolicy Amazon Web Services Private CA rejects the following extensions when they are marked critical in an imported CA certificate or chain. Name constraints Policy constraints CRL distribution points Authority information access Freshest CRL Any other extension", + "Imports a signed private CA certificate into Amazon Web Services Private CA. This action is used when you are using a chain of trust whose root is located outside Amazon Web Services Private CA. Before you can call this action, the following preparations must in place: In Amazon Web Services Private CA, call the CreateCertificateAuthority action to create the private CA that you plan to back with the imported certificate. Call the GetCertificateAuthorityCsr action to generate a certificate signing request (CSR). Sign the CSR using a root or intermediate CA hosted by either an on-premises PKI hierarchy or by a commercial CA. Create a certificate chain and copy the signed certificate and the certificate chain to your working directory. Amazon Web Services Private CA supports three scenarios for installing a CA certificate: Installing a certificate for a root CA hosted by Amazon Web Services Private CA. Installing a subordinate CA certificate whose parent authority is hosted by Amazon Web Services Private CA. Installing a subordinate CA certificate whose parent authority is externally hosted. The following additional requirements apply when you import a CA certificate. Only a self-signed certificate can be imported as a root CA. A self-signed certificate cannot be imported as a subordinate CA. Your certificate chain must not include the private CA certificate that you are importing. Your root CA must be the last certificate in your chain. The subordinate certificate, if any, that your root CA signed must be next to last. The subordinate certificate signed by the preceding subordinate CA must come next, and so on until your chain is built. The chain must be PEM-encoded. The maximum allowed size of a certificate is 32 KB. The maximum allowed size of a certificate chain is 2 MB. Enforcement of Critical Constraints Amazon Web Services Private CA allows the following extensions to be marked critical in the imported CA certificate or chain. Authority key identifier Basic constraints (must be marked critical) Certificate policies Extended key usage Inhibit anyPolicy Issuer alternative name Key usage Name constraints Policy mappings Subject alternative name Subject directory attributes Subject key identifier Subject information access Amazon Web Services Private CA rejects the following extensions when they are marked critical in an imported CA certificate or chain. Authority information access CRL distribution points Freshest CRL Policy constraints Amazon Web Services Private Certificate Authority will also reject any other extension marked as critical not contained on the preceding list of allowed extensions", options: [ { name: "--certificate-authority-arn", @@ -1077,7 +1077,7 @@ const completionSpec: Fig.Spec = { { name: "--revocation-configuration", description: - 'Contains information to enable Online Certificate Status Protocol (OCSP) support, to enable a certificate revocation list (CRL), to enable both, or to enable neither. If this parameter is not supplied, existing capibilites remain unchanged. For more information, see the OcspConfiguration and CrlConfiguration types. The following requirements apply to revocation configurations. A configuration disabling CRLs or OCSP must contain only the Enabled=False parameter, and will fail if other parameters such as CustomCname or ExpirationInDays are included. In a CRL configuration, the S3BucketName parameter must conform to Amazon S3 bucket naming rules. A configuration containing a custom Canonical Name (CNAME) parameter for CRLs or OCSP must conform to RFC2396 restrictions on the use of special characters in a CNAME. In a CRL or OCSP configuration, the value of a CNAME parameter must not include a protocol prefix such as "http://" or "https://"', + 'Contains information to enable support for Online Certificate Status Protocol (OCSP), certificate revocation list (CRL), both protocols, or neither. If you don\'t supply this parameter, existing capibilites remain unchanged. For more information, see the OcspConfiguration and CrlConfiguration types. The following requirements apply to revocation configurations. A configuration disabling CRLs or OCSP must contain only the Enabled=False parameter, and will fail if other parameters such as CustomCname or ExpirationInDays are included. In a CRL configuration, the S3BucketName parameter must conform to Amazon S3 bucket naming rules. A configuration containing a custom Canonical Name (CNAME) parameter for CRLs or OCSP must conform to RFC2396 restrictions on the use of special characters in a CNAME. In a CRL or OCSP configuration, the value of a CNAME parameter must not include a protocol prefix such as "http://" or "https://". If you update the S3BucketName of CrlConfiguration, you can break revocation for existing certificates. In other words, if you call UpdateCertificateAuthority to update the CRL configuration\'s S3 bucket name, Amazon Web Services Private CA only writes CRLs to the new S3 bucket. Certificates issued prior to this point will have the old S3 bucket name in your CRL Distribution Point (CDP) extension, essentially breaking revocation. If you must update the S3 bucket, you\'ll need to reissue old certificates to keep the revocation working. Alternatively, you can use a CustomCname in your CRL configuration if you might need to change the S3 bucket name in the future', args: { name: "structure", }, diff --git a/src/aws/amplify.ts b/src/aws/amplify.ts index f870551bfd5f..c8bb89182ea3 100644 --- a/src/aws/amplify.ts +++ b/src/aws/amplify.ts @@ -752,7 +752,7 @@ const completionSpec: Fig.Spec = { { name: "create-deployment", description: - "Creates a deployment for a manually deployed Amplify app. Manually deployed apps are not connected to a repository. The maximum duration between the CreateDeployment call and the StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the StartDeployment call and the associated Job will fail", + "Creates a deployment for a manually deployed Amplify app. Manually deployed apps are not connected to a Git repository. The maximum duration between the CreateDeployment call and the StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the StartDeployment call and the associated Job will fail", options: [ { name: "--app-id", @@ -1979,7 +1979,7 @@ const completionSpec: Fig.Spec = { { name: "start-deployment", description: - "Starts a deployment for a manually deployed app. Manually deployed apps are not connected to a repository. The maximum duration between the CreateDeployment call and the StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the StartDeployment call and the associated Job will fail", + "Starts a deployment for a manually deployed app. Manually deployed apps are not connected to a Git repository. The maximum duration between the CreateDeployment call and the StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the StartDeployment call and the associated Job will fail", options: [ { name: "--app-id", @@ -1991,7 +1991,7 @@ const completionSpec: Fig.Spec = { }, { name: "--branch-name", - description: "The name of the branch to use for the job", + description: "The name of the branch to use for the deployment job", args: { name: "string", generators: generators.listBranchNames, @@ -2000,7 +2000,7 @@ const completionSpec: Fig.Spec = { { name: "--job-id", description: - "The job ID for this deployment, generated by the create deployment request", + "The job ID for this deployment that is generated by the CreateDeployment request", args: { name: "string", generators: generators.listJobIds, @@ -2009,7 +2009,15 @@ const completionSpec: Fig.Spec = { { name: "--source-url", description: - "The source URL for this deployment, used when calling start deployment without create deployment. The source URL can be any HTTP GET URL that is publicly accessible and downloads a single .zip file", + "The source URL for the deployment that is used when calling StartDeployment without CreateDeployment. The source URL can be either an HTTP GET URL that is publicly accessible and downloads a single .zip file, or an Amazon S3 bucket and prefix", + args: { + name: "string", + }, + }, + { + name: "--source-url-type", + description: + "The type of source specified by the sourceURL. If the value is ZIP, the source is a .zip file. If the value is BUCKET_PREFIX, the source is an Amazon S3 bucket and prefix. If no value is specified, the default is ZIP", args: { name: "string", }, diff --git a/src/aws/application-insights.ts b/src/aws/application-insights.ts index 6095e5857366..a17d58594afc 100644 --- a/src/aws/application-insights.ts +++ b/src/aws/application-insights.ts @@ -88,6 +88,13 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--sns-notification-arn", + description: "The SNS notification topic ARN", + args: { + name: "string", + }, + }, { name: "--tags", description: @@ -234,7 +241,7 @@ const completionSpec: Fig.Spec = { { name: "--rank", description: - "Rank of the log pattern. Must be a value between 1 and 1,000,000. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank 1 will be the first to get matched to a log line. A pattern of rank 1,000,000 will be last to get matched. When you configure custom log patterns from the console, a Low severity pattern translates to a 750,000 rank. A Medium severity pattern translates to a 500,000 rank. And a High severity pattern translates to a 250,000 rank. Rank values less than 1 or greater than 1,000,000 are reserved for AWS-provided patterns", + "Rank of the log pattern. Must be a value between 1 and 1,000,000. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank 1 will be the first to get matched to a log line. A pattern of rank 1,000,000 will be last to get matched. When you configure custom log patterns from the console, a Low severity pattern translates to a 750,000 rank. A Medium severity pattern translates to a 500,000 rank. And a High severity pattern translates to a 250,000 rank. Rank values less than 1 or greater than 1,000,000 are reserved for Amazon Web Services provided patterns", args: { name: "integer", }, @@ -384,7 +391,8 @@ const completionSpec: Fig.Spec = { }, { name: "--account-id", - description: "The AWS account ID for the resource group owner", + description: + "The Amazon Web Services account ID for the resource group owner", args: { name: "string", }, @@ -429,7 +437,8 @@ const completionSpec: Fig.Spec = { }, { name: "--account-id", - description: "The AWS account ID for the resource group owner", + description: + "The Amazon Web Services account ID for the resource group owner", args: { name: "string", }, @@ -473,7 +482,8 @@ const completionSpec: Fig.Spec = { }, { name: "--account-id", - description: "The AWS account ID for the resource group owner", + description: + "The Amazon Web Services account ID for the resource group owner", args: { name: "string", }, @@ -525,7 +535,8 @@ const completionSpec: Fig.Spec = { }, { name: "--workload-name", - description: "The name of the workload", + description: + "The name of the workload. The name of the workload is required when the tier of the application component is SAP_ASE_SINGLE_NODE or SAP_ASE_HIGH_AVAILABILITY", args: { name: "string", }, @@ -583,7 +594,8 @@ const completionSpec: Fig.Spec = { }, { name: "--account-id", - description: "The AWS account ID for the resource group owner", + description: + "The Amazon Web Services account ID for the resource group owner", args: { name: "string", }, @@ -620,7 +632,8 @@ const completionSpec: Fig.Spec = { }, { name: "--account-id", - description: "The AWS account ID for the resource group owner", + description: + "The Amazon Web Services account ID for the resource group owner", args: { name: "string", }, @@ -658,7 +671,7 @@ const completionSpec: Fig.Spec = { { name: "--account-id", description: - "The AWS account ID for the owner of the resource group affected by the problem", + "The Amazon Web Services account ID for the owner of the resource group affected by the problem", args: { name: "string", }, @@ -696,7 +709,8 @@ const completionSpec: Fig.Spec = { }, { name: "--account-id", - description: "The AWS account ID for the resource group owner", + description: + "The Amazon Web Services account ID for the resource group owner", args: { name: "string", }, @@ -747,7 +761,8 @@ const completionSpec: Fig.Spec = { }, { name: "--account-id", - description: "The AWS account ID for the workload owner", + description: + "The Amazon Web Services account ID for the workload owner", args: { name: "string", }, @@ -792,7 +807,8 @@ const completionSpec: Fig.Spec = { }, { name: "--account-id", - description: "The AWS account ID for the resource group owner", + description: + "The Amazon Web Services account ID for the resource group owner", args: { name: "string", }, @@ -845,7 +861,8 @@ const completionSpec: Fig.Spec = { }, { name: "--account-id", - description: "The AWS account ID for the resource group owner", + description: + "The Amazon Web Services account ID for the resource group owner", args: { name: "string", }, @@ -921,7 +938,8 @@ const completionSpec: Fig.Spec = { }, { name: "--account-id", - description: "The AWS account ID for the resource group owner", + description: + "The Amazon Web Services account ID for the resource group owner", args: { name: "string", }, @@ -973,7 +991,8 @@ const completionSpec: Fig.Spec = { }, { name: "--account-id", - description: "The AWS account ID for the resource group owner", + description: + "The Amazon Web Services account ID for the resource group owner", args: { name: "string", }, @@ -1032,7 +1051,8 @@ const completionSpec: Fig.Spec = { }, { name: "--account-id", - description: "The AWS account ID for the resource group owner", + description: + "The Amazon Web Services account ID for the resource group owner", args: { name: "string", }, @@ -1062,7 +1082,8 @@ const completionSpec: Fig.Spec = { options: [ { name: "--account-id", - description: "The AWS account ID for the resource group owner", + description: + "The Amazon Web Services account ID for the resource group owner", args: { name: "string", }, @@ -1207,7 +1228,8 @@ const completionSpec: Fig.Spec = { }, { name: "--account-id", - description: "The AWS account ID of the owner of the workload", + description: + "The Amazon Web Services account ID of the owner of the workload", args: { name: "string", }, @@ -1394,6 +1416,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--sns-notification-arn", + description: + "The SNS topic ARN. Allows you to receive SNS notifications for updates and issues with an application", + args: { + name: "string", + }, + }, { name: "--remove-sns-topic", description: @@ -1602,7 +1632,7 @@ const completionSpec: Fig.Spec = { { name: "--rank", description: - "Rank of the log pattern. Must be a value between 1 and 1,000,000. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank 1 will be the first to get matched to a log line. A pattern of rank 1,000,000 will be last to get matched. When you configure custom log patterns from the console, a Low severity pattern translates to a 750,000 rank. A Medium severity pattern translates to a 500,000 rank. And a High severity pattern translates to a 250,000 rank. Rank values less than 1 or greater than 1,000,000 are reserved for AWS-provided patterns", + "Rank of the log pattern. Must be a value between 1 and 1,000,000. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank 1 will be the first to get matched to a log line. A pattern of rank 1,000,000 will be last to get matched. When you configure custom log patterns from the console, a Low severity pattern translates to a 750,000 rank. A Medium severity pattern translates to a 500,000 rank. And a High severity pattern translates to a 250,000 rank. Rank values less than 1 or greater than 1,000,000 are reserved for Amazon Web Services provided patterns", args: { name: "integer", }, diff --git a/src/aws/athena.ts b/src/aws/athena.ts index d8091f21993d..906fac31d522 100644 --- a/src/aws/athena.ts +++ b/src/aws/athena.ts @@ -196,7 +196,7 @@ const completionSpec: Fig.Spec = { { name: "--type", description: - "The type of data catalog to create: LAMBDA for a federated catalog, GLUE for an Glue Data Catalog, and HIVE for an external Apache Hive metastore. FEDERATED is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass", + "The type of data catalog to create: LAMBDA for a federated catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog", args: { name: "string", }, @@ -211,7 +211,7 @@ const completionSpec: Fig.Spec = { { name: "--parameters", description: - 'Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type. For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both. If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function. function=lambda_arn The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs. catalog-id=catalog_id The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify. The FEDERATED data catalog type uses one of the following parameters, but not both. Use connection-arn for an existing Glue connection. Use connection-type and connection-properties to specify the configuration setting for a new connection. connection-arn: lambda-role-arn (optional): The execution role to use for the Lambda function. If not provided, one is created. connection-type:MYSQL|REDSHIFT|...., connection-properties:"" For , use escaped JSON text, as in the following example. "{\\"spill_bucket\\":\\"my_spill\\",\\"spill_prefix\\":\\"athena-spill\\",\\"host\\":\\"abc12345.snowflakecomputing.com\\",\\"port\\":\\"1234\\",\\"warehouse\\":\\"DEV_WH\\",\\"database\\":\\"TEST\\",\\"schema\\":\\"PUBLIC\\",\\"SecretArn\\":\\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\\"}"', + "Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type. For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both. If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function. function=lambda_arn The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs. catalog-id=catalog_id The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify", args: { name: "map", }, diff --git a/src/aws/autoscaling.ts b/src/aws/autoscaling.ts index 5e0b6f49b583..9175b71f2ebc 100644 --- a/src/aws/autoscaling.ts +++ b/src/aws/autoscaling.ts @@ -84,7 +84,7 @@ const completionSpec: Fig.Spec = { { name: "attach-load-balancers", description: - "This API operation is superseded by AttachTrafficSources, which can attach multiple traffic sources types. We recommend using AttachTrafficSources to simplify how you manage traffic sources. However, we continue to support AttachLoadBalancers. You can use both the original AttachLoadBalancers API operation and AttachTrafficSources on the same Auto Scaling group. Attaches one or more Classic Load Balancers to the specified Auto Scaling group. Amazon EC2 Auto Scaling registers the running instances with these Classic Load Balancers. To describe the load balancers for an Auto Scaling group, call the DescribeLoadBalancers API. To detach a load balancer from the Auto Scaling group, call the DetachLoadBalancers API. This operation is additive and does not detach existing Classic Load Balancers or target groups from the Auto Scaling group. For more information, see Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide", + "This API operation is superseded by https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_AttachTrafficSources.html, which can attach multiple traffic sources types. We recommend using AttachTrafficSources to simplify how you manage traffic sources. However, we continue to support AttachLoadBalancers. You can use both the original AttachLoadBalancers API operation and AttachTrafficSources on the same Auto Scaling group. Attaches one or more Classic Load Balancers to the specified Auto Scaling group. Amazon EC2 Auto Scaling registers the running instances with these Classic Load Balancers. To describe the load balancers for an Auto Scaling group, call the DescribeLoadBalancers API. To detach a load balancer from the Auto Scaling group, call the DetachLoadBalancers API. This operation is additive and does not detach existing Classic Load Balancers or target groups from the Auto Scaling group. For more information, see Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide", options: [ { name: "--auto-scaling-group-name", @@ -3634,7 +3634,7 @@ const completionSpec: Fig.Spec = { { name: "--placement-group", description: - "The name of an existing placement group into which to launch your instances. For more information, see Placement groups in the Amazon EC2 User Guide for Linux Instances. A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group", + "The name of an existing placement group into which to launch your instances. To remove the placement group setting, pass an empty string for placement-group. For more information about placement groups, see Placement groups in the Amazon EC2 User Guide for Linux Instances. A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group", args: { name: "string", }, diff --git a/src/aws/bedrock-agent.ts b/src/aws/bedrock-agent.ts index ac6a25d3e1a5..75d4afe7a957 100644 --- a/src/aws/bedrock-agent.ts +++ b/src/aws/bedrock-agent.ts @@ -1661,7 +1661,7 @@ const completionSpec: Fig.Spec = { { name: "get-ingestion-job", description: - "Gets information about a data ingestion job. Data sources are ingested into your knowledge base so that Large Lanaguage Models (LLMs) can use your data", + "Gets information about a data ingestion job. Data sources are ingested into your knowledge base so that Large Language Models (LLMs) can use your data", options: [ { name: "--data-source-id", diff --git a/src/aws/cloudformation.ts b/src/aws/cloudformation.ts index 5b4680798283..cf2ce689d2b1 100644 --- a/src/aws/cloudformation.ts +++ b/src/aws/cloudformation.ts @@ -491,7 +491,7 @@ const generators: Record = { const completionSpec: Fig.Spec = { name: "cloudformation", description: - "CloudFormation CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly reliable, highly scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services infrastructure. With CloudFormation, you declare all your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you. For more information about CloudFormation, see the CloudFormation product page. CloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com", + "CloudFormation CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Amazon EC2 Auto Scaling to build highly reliable, highly scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services infrastructure. With CloudFormation, you declare all your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you. For more information about CloudFormation, see the CloudFormation product page. CloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com", subcommands: [ { name: "activate-organizations-access", @@ -520,7 +520,7 @@ const completionSpec: Fig.Spec = { { name: "activate-type", description: - "Activates a public third-party extension, making it available for use in stack templates. For more information, see Using public extensions in the CloudFormation User Guide. Once you have activated a public third-party extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide", + "Activates a public third-party extension, making it available for use in stack templates. Once you have activated a public third-party extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Using public extensions in the CloudFormation User Guide", options: [ { name: "--type", @@ -626,7 +626,7 @@ const completionSpec: Fig.Spec = { { name: "batch-describe-type-configurations", description: - "Returns configuration data for the specified CloudFormation extensions, from the CloudFormation registry for the account and Region. For more information, see Configuring extensions at the account level in the CloudFormation User Guide", + "Returns configuration data for the specified CloudFormation extensions, from the CloudFormation registry for the account and Region. For more information, see Edit configuration data for extensions in your account in the CloudFormation User Guide", options: [ { name: "--type-configuration-identifiers", @@ -700,7 +700,7 @@ const completionSpec: Fig.Spec = { { name: "continue-update-rollback", description: - "For a specified stack that's in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try to update the stack again. A stack goes into the UPDATE_ROLLBACK_FAILED state when CloudFormation can't roll back all changes after a failed stack update. For example, you might have a stack that's rolling back to an old database instance that was deleted outside of CloudFormation. Because CloudFormation doesn't know the database was deleted, it assumes that the database instance still exists and attempts to roll back to it, causing the update rollback to fail", + "For a specified stack that's in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try to update the stack again. A stack goes into the UPDATE_ROLLBACK_FAILED state when CloudFormation can't roll back all changes after a failed stack update. For example, you might have a stack that's rolling back to an old database instance that was deleted outside of CloudFormation. Because CloudFormation doesn't know the database was deleted, it assumes that the database instance still exists and attempts to roll back to it, causing the update rollback to fail", options: [ { name: "--stack-name", @@ -714,7 +714,7 @@ const completionSpec: Fig.Spec = { { name: "--role-arn", description: - "The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to roll back the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least permission. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials", + "The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to roll back the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least permission. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials", args: { name: "string", generators: generators.listRoleArns, @@ -723,7 +723,7 @@ const completionSpec: Fig.Spec = { { name: "--resources-to-skip", description: - "A list of the logical IDs of the resources that CloudFormation skips during the continue update rollback operation. You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update was canceled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason. Specify this property to skip rolling back resources that CloudFormation can't successfully roll back. We recommend that you troubleshoot resources before skipping them. CloudFormation sets the status of the specified resources to UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the skipped resources will be inconsistent with the state of the resources in the stack template. Before performing another stack update, you must update the stack or resources to be consistent with each other. If you don't, subsequent stack updates might fail, and the stack will become unrecoverable. Specify the minimum number of resources required to successfully roll back your stack. For example, a failed resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the dependent resources. To skip resources that are part of nested stacks, use the following format: NestedStackName.ResourceLogicalID. If you want to specify the logical ID of a stack resource (Type: AWS::CloudFormation::Stack) in the ResourcesToSkip list, then its corresponding embedded stack must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or DELETE_FAILED. Don't confuse a child stack's name with its corresponding logical ID defined in the parent stack. For an example of a continue update rollback operation with nested stacks, see Using ResourcesToSkip to recover a nested stacks hierarchy", + "A list of the logical IDs of the resources that CloudFormation skips during the continue update rollback operation. You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update was canceled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason. Specify this property to skip rolling back resources that CloudFormation can't successfully roll back. We recommend that you troubleshoot resources before skipping them. CloudFormation sets the status of the specified resources to UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the skipped resources will be inconsistent with the state of the resources in the stack template. Before performing another stack update, you must update the stack or resources to be consistent with each other. If you don't, subsequent stack updates might fail, and the stack will become unrecoverable. Specify the minimum number of resources required to successfully roll back your stack. For example, a failed resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the dependent resources. To skip resources that are part of nested stacks, use the following format: NestedStackName.ResourceLogicalID. If you want to specify the logical ID of a stack resource (Type: AWS::CloudFormation::Stack) in the ResourcesToSkip list, then its corresponding embedded stack must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or DELETE_FAILED. Don't confuse a child stack's name with its corresponding logical ID defined in the parent stack. For an example of a continue update rollback operation with nested stacks, see Continue rolling back from failed nested stack updates", args: { name: "list", isVariadic: true, @@ -811,7 +811,7 @@ const completionSpec: Fig.Spec = { { name: "--capabilities", description: - "In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect. If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability. For more information about macros, see Using CloudFormation macros to perform custom processing on templates. Only one of the Capabilities and ResourceType parameters can be specified", + "In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect. If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability. For more information about macros, see Perform custom processing on CloudFormation templates with template macros. Only one of the Capabilities and ResourceType parameters can be specified", args: { name: "list", isVariadic: true, @@ -825,7 +825,7 @@ const completionSpec: Fig.Spec = { { name: "--resource-types", description: - "The template resource types that you have permissions to work with if you execute this change set, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for condition keys in IAM policies for CloudFormation. For more information, see Controlling access with Identity and Access Management in the CloudFormation User Guide. Only one of the Capabilities and ResourceType parameters can be specified", + "The template resource types that you have permissions to work with if you execute this change set, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. IAM uses this parameter for condition keys in IAM policies for CloudFormation. For more information, see Control access with Identity and Access Management in the CloudFormation User Guide. Only one of the Capabilities and ResourceType parameters can be specified", args: { name: "list", isVariadic: true, @@ -834,7 +834,7 @@ const completionSpec: Fig.Spec = { { name: "--role-arn", description: - "The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes when executing the change set. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least permission. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials", + "The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes when executing the change set. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least permission. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials", args: { name: "string", generators: generators.listRoleArns, @@ -851,7 +851,7 @@ const completionSpec: Fig.Spec = { { name: "--notification-arns", description: - "The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list", + "The Amazon Resource Names (ARNs) of Amazon SNS topics that CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list", args: { name: "list", generators: generators.listSNSTopics, @@ -928,12 +928,12 @@ const completionSpec: Fig.Spec = { { name: "--import-existing-resources", description: - "Indicates if the change set imports resources that already exist. This parameter can only import resources that have custom names in templates. For more information, see name type in the CloudFormation User Guide. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead. For more information, see Bringing existing resources into CloudFormation management in the CloudFormation User Guide", + "Indicates if the change set imports resources that already exist. This parameter can only import resources that have custom names in templates. For more information, see name type in the CloudFormation User Guide. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead. For more information, see Import Amazon Web Services resources into a CloudFormation stack with a resource import in the CloudFormation User Guide", }, { name: "--no-import-existing-resources", description: - "Indicates if the change set imports resources that already exist. This parameter can only import resources that have custom names in templates. For more information, see name type in the CloudFormation User Guide. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead. For more information, see Bringing existing resources into CloudFormation management in the CloudFormation User Guide", + "Indicates if the change set imports resources that already exist. This parameter can only import resources that have custom names in templates. For more information, see name type in the CloudFormation User Guide. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead. For more information, see Import Amazon Web Services resources into a CloudFormation stack with a resource import in the CloudFormation User Guide", }, { name: "--cli-input-json", @@ -1013,7 +1013,7 @@ const completionSpec: Fig.Spec = { { name: "create-stack", description: - "Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack through the DescribeStacks operation", + "Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack through the DescribeStacks operation. For more information about creating a stack and monitoring stack progress, see Managing Amazon Web Services resources as a single unit with CloudFormation stacks in the CloudFormation User Guide", options: [ { name: "--stack-name", @@ -1026,7 +1026,7 @@ const completionSpec: Fig.Spec = { { name: "--template-body", description: - "Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template anatomy in the CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both", + "Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both", args: { name: "string", generators: generators.listFiles, @@ -1035,7 +1035,7 @@ const completionSpec: Fig.Spec = { { name: "--template-url", description: - "Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to the Template anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both", + "Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both", args: { name: "string", }, @@ -1078,7 +1078,7 @@ const completionSpec: Fig.Spec = { { name: "--notification-arns", description: - "The Amazon Simple Notification Service (Amazon SNS) topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI)", + "The Amazon SNS topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI)", args: { name: "list", isVariadic: true, @@ -1088,7 +1088,7 @@ const completionSpec: Fig.Spec = { { name: "--capabilities", description: - "In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability. You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified. For more information, see Using CloudFormation macros to perform custom processing on templates. Only one of the Capabilities and ResourceType parameters can be specified", + "In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability. You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified. For more information, see Perform custom processing on CloudFormation templates with template macros. Only one of the Capabilities and ResourceType parameters can be specified", args: { name: "list", isVariadic: true, @@ -1102,7 +1102,7 @@ const completionSpec: Fig.Spec = { { name: "--resource-types", description: - "The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. Use the following syntax to describe template resource types: AWS::* (for all Amazon Web Services resources), Custom::* (for all custom resources), Custom::logical_ID (for a specific custom resource), AWS::service_name::* (for all resources of a particular Amazon Web Services service), and AWS::service_name::resource_logical_ID (for a specific Amazon Web Services resource). If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with Identity and Access Management. Only one of the Capabilities and ResourceType parameters can be specified", + "The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. Use the following syntax to describe template resource types: AWS::* (for all Amazon Web Services resources), Custom::* (for all custom resources), Custom::logical_ID (for a specific custom resource), AWS::service_name::* (for all resources of a particular Amazon Web Services service), and AWS::service_name::resource_logical_ID (for a specific Amazon Web Services resource). If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, CloudFormation grants permissions to all resource types. IAM uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Control access with Identity and Access Management. Only one of the Capabilities and ResourceType parameters can be specified", args: { name: "list", isVariadic: true, @@ -1111,7 +1111,7 @@ const completionSpec: Fig.Spec = { { name: "--role-arn", description: - "The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to create the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials", + "The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to create the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials", args: { name: "string", generators: generators.listRoleArns, @@ -1129,7 +1129,7 @@ const completionSpec: Fig.Spec = { { name: "--stack-policy-body", description: - "Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both", + "Structure containing the stack policy body. For more information, see Prevent updates to stack resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both", args: { name: "string", generators: generators.listFiles, @@ -1163,12 +1163,12 @@ const completionSpec: Fig.Spec = { { name: "--enable-termination-protection", description: - "Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks by default. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack", + "Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protect CloudFormation stacks from being deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks by default. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack", }, { name: "--no-enable-termination-protection", description: - "Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks by default. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack", + "Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protect CloudFormation stacks from being deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks by default. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack", }, { name: "--retain-except-on-create", @@ -1319,7 +1319,7 @@ const completionSpec: Fig.Spec = { { name: "--template-body", description: - "The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both", + "The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both", args: { name: "string", generators: generators.listFiles, @@ -1328,7 +1328,7 @@ const completionSpec: Fig.Spec = { { name: "--template-url", description: - "The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information, see Template Anatomy in the CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both", + "The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both", args: { name: "string", }, @@ -1352,7 +1352,7 @@ const completionSpec: Fig.Spec = { { name: "--capabilities", description: - "In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail", + "In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail", args: { name: "list", isVariadic: true, @@ -1601,7 +1601,7 @@ const completionSpec: Fig.Spec = { { name: "delete-stack", description: - "Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks don't show up in the DescribeStacks operation if the deletion has been completed successfully", + "Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks don't show up in the DescribeStacks operation if the deletion has been completed successfully. For more information about deleting a stack, see Delete a stack from the CloudFormation console in the CloudFormation User Guide", options: [ { name: "--stack-name", @@ -1625,7 +1625,7 @@ const completionSpec: Fig.Spec = { { name: "--role-arn", description: - "The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to delete the stack. CloudFormation uses the role's credentials to make calls on your behalf. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials", + "The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to delete the stack. CloudFormation uses the role's credentials to make calls on your behalf. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials", args: { name: "string", generators: generators.listRoleArns, @@ -1873,7 +1873,7 @@ const completionSpec: Fig.Spec = { { name: "describe-account-limits", description: - "Retrieves your account's CloudFormation limits, such as the maximum number of stacks that you can create in your account. For more information about account limits, see CloudFormation Quotas in the CloudFormation User Guide", + "Retrieves your account's CloudFormation limits, such as the maximum number of stacks that you can create in your account. For more information about account limits, see Understand CloudFormation quotas in the CloudFormation User Guide", options: [ { name: "--next-token", @@ -1922,7 +1922,7 @@ const completionSpec: Fig.Spec = { { name: "describe-change-set", description: - "Returns the inputs for the change set and a list of changes that CloudFormation will make if you execute the change set. For more information, see Updating Stacks Using Change Sets in the CloudFormation User Guide", + "Returns the inputs for the change set and a list of changes that CloudFormation will make if you execute the change set. For more information, see Update CloudFormation stacks using change sets in the CloudFormation User Guide", options: [ { name: "--change-set-name", @@ -2119,7 +2119,7 @@ const completionSpec: Fig.Spec = { { name: "describe-publisher", description: - "Returns information about a CloudFormation extension publisher. If you don't supply a PublisherId, and you have registered as an extension publisher, DescribePublisher returns information about your own publisher account. For more information about registering as a publisher, see: RegisterPublisher Publishing extensions to make them available for public use in the CloudFormation CLI User Guide", + "Returns information about a CloudFormation extension publisher. If you don't supply a PublisherId, and you have registered as an extension publisher, DescribePublisher returns information about your own publisher account. For more information about registering as a publisher, see: RegisterPublisher Publishing extensions to make them available for public use in the CloudFormation Command Line Interface (CLI) User Guide", options: [ { name: "--publisher-id", @@ -2181,7 +2181,7 @@ const completionSpec: Fig.Spec = { { name: "describe-stack-drift-detection-status", description: - "Returns information about a stack drift detection operation. A stack drift detection operation detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. A stack is considered to have drifted if one or more of its resources have drifted. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources. Use DetectStackDrift to initiate a stack drift detection operation. DetectStackDrift returns a StackDriftDetectionId you can use to monitor the progress of the operation using DescribeStackDriftDetectionStatus. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources", + "Returns information about a stack drift detection operation. A stack drift detection operation detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. A stack is considered to have drifted if one or more of its resources have drifted. For more information about stack and resource drift, see Detect unmanaged configuration changes to stacks and resources with drift detection. Use DetectStackDrift to initiate a stack drift detection operation. DetectStackDrift returns a StackDriftDetectionId you can use to monitor the progress of the operation using DescribeStackDriftDetectionStatus. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources", options: [ { name: "--stack-drift-detection-id", @@ -2214,7 +2214,7 @@ const completionSpec: Fig.Spec = { { name: "describe-stack-events", description: - "Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, see CloudFormation stack creation events in the CloudFormation User Guide. You can list events for stacks that have failed to create or have been deleted by specifying the unique stack identifier (stack ID)", + "Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, see Understand CloudFormation stack creation events in the CloudFormation User Guide. You can list events for stacks that have failed to create or have been deleted by specifying the unique stack identifier (stack ID)", options: [ { name: "--stack-name", @@ -2376,7 +2376,7 @@ const completionSpec: Fig.Spec = { { name: "describe-stack-resource-drifts", description: - "Returns drift information for the resources that have been checked for drift in the specified stack. This includes actual and expected configuration values for resources where CloudFormation detects configuration drift. For a given stack, there will be one StackResourceDrift for each stack resource that has been checked for drift. Resources that haven't yet been checked for drift aren't included. Resources that don't currently support drift detection aren't checked, and so not included. For a list of resources that support drift detection, see Resources that Support Drift Detection. Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all supported resources for a given stack", + "Returns drift information for the resources that have been checked for drift in the specified stack. This includes actual and expected configuration values for resources where CloudFormation detects configuration drift. For a given stack, there will be one StackResourceDrift for each stack resource that has been checked for drift. Resources that haven't yet been checked for drift aren't included. Resources that don't currently support drift detection aren't checked, and so not included. For a list of resources that support drift detection, see Resource type support for imports and drift detection. Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all supported resources for a given stack", options: [ { name: "--stack-name", @@ -2436,7 +2436,7 @@ const completionSpec: Fig.Spec = { { name: "describe-stack-resources", description: - "Returns Amazon Web Services resource descriptions for running and deleted stacks. If StackName is specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId is specified, the associated resources of the stack that the resource belongs to are returned. Only the first 100 resources will be returned. If your stack has more resources than this, you should use ListStackResources instead. For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the stack has been deleted. You must specify either StackName or PhysicalResourceId, but not both. In addition, you can specify LogicalResourceId to filter the returned result. For more information about resources, the LogicalResourceId and PhysicalResourceId, go to the CloudFormation User Guide. A ValidationError is returned if you specify both StackName and PhysicalResourceId in the same request", + "Returns Amazon Web Services resource descriptions for running and deleted stacks. If StackName is specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId is specified, the associated resources of the stack that the resource belongs to are returned. Only the first 100 resources will be returned. If your stack has more resources than this, you should use ListStackResources instead. For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the stack has been deleted. You must specify either StackName or PhysicalResourceId, but not both. In addition, you can specify LogicalResourceId to filter the returned result. For more information about resources, the LogicalResourceId and PhysicalResourceId, see the CloudFormation User Guide. A ValidationError is returned if you specify both StackName and PhysicalResourceId in the same request", options: [ { name: "--stack-name", @@ -2580,7 +2580,7 @@ const completionSpec: Fig.Spec = { { name: "describe-stacks", description: - "Returns the description for the specified stack; if no stack name was specified, then it returns the description for all the stacks created. For more information about a stack's event history, see CloudFormation stack creation events in the CloudFormation User Guide. If the stack doesn't exist, a ValidationError is returned", + "Returns the description for the specified stack; if no stack name was specified, then it returns the description for all the stacks created. For more information about a stack's event history, see Understand CloudFormation stack creation events in the CloudFormation User Guide. If the stack doesn't exist, a ValidationError is returned", options: [ { name: "--stack-name", @@ -2745,7 +2745,7 @@ const completionSpec: Fig.Spec = { { name: "detect-stack-drift", description: - "Detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For each resource in the stack that supports drift detection, CloudFormation compares the actual configuration of the resource with its expected template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A stack is considered to have drifted if one or more of its resources differ from their expected template configurations. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources. Use DetectStackDrift to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources. For a list of stack resources that currently support drift detection, see Resources that Support Drift Detection. DetectStackDrift can take up to several minutes, depending on the number of resources contained within the stack. Use DescribeStackDriftDetectionStatus to monitor the progress of a detect stack drift operation. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources. When detecting drift on a stack, CloudFormation doesn't detect drift on any nested stacks belonging to that stack. Perform DetectStackDrift directly on the nested stack itself", + "Detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For each resource in the stack that supports drift detection, CloudFormation compares the actual configuration of the resource with its expected template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A stack is considered to have drifted if one or more of its resources differ from their expected template configurations. For more information, see Detect unmanaged configuration changes to stacks and resources with drift detection. Use DetectStackDrift to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources. For a list of stack resources that currently support drift detection, see Resource type support for imports and drift detection. DetectStackDrift can take up to several minutes, depending on the number of resources contained within the stack. Use DescribeStackDriftDetectionStatus to monitor the progress of a detect stack drift operation. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources. When detecting drift on a stack, CloudFormation doesn't detect drift on any nested stacks belonging to that stack. Perform DetectStackDrift directly on the nested stack itself", options: [ { name: "--stack-name", @@ -2788,7 +2788,7 @@ const completionSpec: Fig.Spec = { { name: "detect-stack-resource-drift", description: - "Returns information about whether a resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. This information includes actual and expected property values for resources in which CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources. Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection. Resources that don't currently support drift detection can't be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection", + "Returns information about whether a resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. This information includes actual and expected property values for resources in which CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information about stack and resource drift, see Detect unmanaged configuration changes to stacks and resources with drift detection. Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection. Resources that don't currently support drift detection can't be checked. For a list of resources that support drift detection, see Resource type support for imports and drift detection", options: [ { name: "--stack-name", @@ -2893,7 +2893,7 @@ const completionSpec: Fig.Spec = { { name: "--template-body", description: - "Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the CloudFormation User Guide.) Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only TemplateBody is used", + "Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only TemplateBody is used", args: { name: "string", generators: generators.listFiles, @@ -2902,7 +2902,7 @@ const completionSpec: Fig.Spec = { { name: "--template-url", description: - "Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used", + "Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used", args: { name: "string", }, @@ -3141,7 +3141,7 @@ const completionSpec: Fig.Spec = { { name: "--template-body", description: - "Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information about templates, see Template anatomy in the CloudFormation User Guide. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL", + "Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL", args: { name: "string", generators: generators.listFiles, @@ -3150,7 +3150,7 @@ const completionSpec: Fig.Spec = { { name: "--template-url", description: - "Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information about templates, see Template anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL", + "Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL", args: { name: "string", }, @@ -3350,7 +3350,7 @@ const completionSpec: Fig.Spec = { { name: "list-exports", description: - "Lists all exported output values in the account and Region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function. For more information, see CloudFormation export stack output values", + "Lists all exported output values in the account and Region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function. For more information, see Get exported outputs from a deployed CloudFormation stack", options: [ { name: "--next-token", @@ -4691,7 +4691,7 @@ const completionSpec: Fig.Spec = { { name: "publish-type", description: - "Publishes the specified extension to the CloudFormation registry as a public extension in this Region. Public extensions are available for use by all CloudFormation users. For more information about publishing extensions, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide. To publish an extension, you must be registered as a publisher with CloudFormation. For more information, see RegisterPublisher", + "Publishes the specified extension to the CloudFormation registry as a public extension in this Region. Public extensions are available for use by all CloudFormation users. For more information about publishing extensions, see Publishing extensions to make them available for public use in the CloudFormation Command Line Interface (CLI) User Guide. To publish an extension, you must be registered as a publisher with CloudFormation. For more information, see RegisterPublisher", options: [ { name: "--type", @@ -4839,7 +4839,7 @@ const completionSpec: Fig.Spec = { { name: "register-publisher", description: - "Registers your account as a publisher of public extensions in the CloudFormation registry. Public extensions are available for use by all CloudFormation users. This publisher ID applies to your account in all Amazon Web Services Regions. For information about requirements for registering as a public extension publisher, see Registering your account to publish CloudFormation extensions in the CloudFormation CLI User Guide", + "Registers your account as a publisher of public extensions in the CloudFormation registry. Public extensions are available for use by all CloudFormation users. This publisher ID applies to your account in all Amazon Web Services Regions. For information about requirements for registering as a public extension publisher, see Prerequisite: Registering your account to publish CloudFormation extensions in the CloudFormation Command Line Interface (CLI) User Guide", options: [ { name: "--accept-terms-and-conditions", @@ -4854,7 +4854,7 @@ const completionSpec: Fig.Spec = { { name: "--connection-arn", description: - "If you are using a Bitbucket or GitHub account for identity verification, the Amazon Resource Name (ARN) for your connection to that account. For more information, see Registering your account to publish CloudFormation extensions in the CloudFormation CLI User Guide", + "If you are using a Bitbucket or GitHub account for identity verification, the Amazon Resource Name (ARN) for your connection to that account. For more information, see Prerequisite: Registering your account to publish CloudFormation extensions in the CloudFormation Command Line Interface (CLI) User Guide", args: { name: "string", }, @@ -4881,7 +4881,7 @@ const completionSpec: Fig.Spec = { { name: "register-type", description: - "Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your Amazon Web Services account, and includes: Validating the extension schema. Determining which handlers, if any, have been specified for the extension. Making the extension available for use in your account. For more information about how to develop extensions and ready them for registration, see Creating Resource Providers in the CloudFormation CLI User Guide. You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per Region. Use DeregisterType to deregister specific extension versions if necessary. Once you have initiated a registration request using RegisterType, you can use DescribeTypeRegistration to monitor the progress of the registration request. Once you have registered a private extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide", + "Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your Amazon Web Services account, and includes: Validating the extension schema. Determining which handlers, if any, have been specified for the extension. Making the extension available for use in your account. For more information about how to develop extensions and ready them for registration, see Creating resource types using the CloudFormation CLI in the CloudFormation Command Line Interface (CLI) User Guide. You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per Region. Use DeregisterType to deregister specific extension versions if necessary. Once you have initiated a registration request using RegisterType, you can use DescribeTypeRegistration to monitor the progress of the registration request. Once you have registered a private extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Edit configuration data for extensions in your account in the CloudFormation User Guide", options: [ { name: "--type", @@ -4903,7 +4903,7 @@ const completionSpec: Fig.Spec = { { name: "--schema-handler-package", description: - "A URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. For information about generating a schema handler package for the extension you want to register, see submit in the CloudFormation CLI User Guide. The user registering the extension must be able to access the package in the S3 bucket. That's, the user needs to have GetObject permissions for the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Identity and Access Management User Guide", + "A URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. For information about generating a schema handler package for the extension you want to register, see submit in the CloudFormation Command Line Interface (CLI) User Guide. The user registering the extension must be able to access the package in the S3 bucket. That's, the user needs to have GetObject permissions for the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Identity and Access Management User Guide", args: { name: "string", generators: generators.listRemoteFilesGenerator, @@ -4969,7 +4969,7 @@ const completionSpec: Fig.Spec = { { name: "--role-arn", description: - "The Amazon Resource Name (ARN) of an Identity and Access Management role that CloudFormation assumes to rollback the stack", + "The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to rollback the stack", args: { name: "string", }, @@ -5026,7 +5026,7 @@ const completionSpec: Fig.Spec = { { name: "--stack-policy-body", description: - "Structure containing the stack policy body. For more information, go to Prevent updates to stack resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both", + "Structure containing the stack policy body. For more information, see Prevent updates to stack resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both", args: { name: "string", generators: generators.listFiles, @@ -5063,7 +5063,7 @@ const completionSpec: Fig.Spec = { { name: "set-type-configuration", description: - "Specifies the configuration data for a registered CloudFormation extension, in the given account and Region. To view the current configuration data for an extension, refer to the ConfigurationSchema element of DescribeType. For more information, see Configuring extensions at the account level in the CloudFormation User Guide. It's strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Using dynamic references to specify template values in the CloudFormation User Guide", + "Specifies the configuration data for a registered CloudFormation extension, in the given account and Region. To view the current configuration data for an extension, refer to the ConfigurationSchema element of DescribeType. For more information, see Edit configuration data for extensions in your account in the CloudFormation User Guide. It's strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Specify values stored in other services using dynamic references in the CloudFormation User Guide", options: [ { name: "--type-arn", @@ -5076,7 +5076,7 @@ const completionSpec: Fig.Spec = { { name: "--configuration", description: - "The configuration data for the extension, in this account and Region. The configuration data must be formatted as JSON, and validate against the schema returned in the ConfigurationSchema response element of DescribeType. For more information, see Defining account-level configuration data for an extension in the CloudFormation CLI User Guide", + "The configuration data for the extension, in this account and Region. The configuration data must be formatted as JSON, and validate against the schema returned in the ConfigurationSchema response element of DescribeType. For more information, see Defining the account-level configuration of an extension in the CloudFormation Command Line Interface (CLI) User Guide", args: { name: "string", }, @@ -5333,7 +5333,7 @@ const completionSpec: Fig.Spec = { { name: "test-type", description: - "Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry. For resource types, this includes passing all contracts tests defined for the type. For modules, this includes determining if the module's model meets all necessary requirements. For more information, see Testing your public extension prior to publishing in the CloudFormation CLI User Guide. If you don't specify a version, CloudFormation uses the default version of the extension in your account and Region for testing. To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType. Once you've initiated testing on an extension using TestType, you can pass the returned TypeVersionArn into DescribeType to monitor the current test status and test status description for the extension. An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide", + "Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry. For resource types, this includes passing all contracts tests defined for the type. For modules, this includes determining if the module's model meets all necessary requirements. For more information, see Testing your public extension before publishing in the CloudFormation Command Line Interface (CLI) User Guide. If you don't specify a version, CloudFormation uses the default version of the extension in your account and Region for testing. To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType. Once you've initiated testing on an extension using TestType, you can pass the returned TypeVersionArn into DescribeType to monitor the current test status and test status description for the extension. An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation Command Line Interface (CLI) User Guide", options: [ { name: "--arn", @@ -5370,7 +5370,7 @@ const completionSpec: Fig.Spec = { { name: "--log-delivery-bucket", description: - "The S3 bucket to which CloudFormation delivers the contract test execution logs. CloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of PASSED or FAILED. The user calling TestType must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions: GetObject PutObject For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Amazon Web Services Identity and Access Management User Guide", + "The S3 bucket to which CloudFormation delivers the contract test execution logs. CloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of PASSED or FAILED. The user calling TestType must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions: GetObject PutObject For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Identity and Access Management User Guide", args: { name: "string", }, @@ -5471,7 +5471,7 @@ const completionSpec: Fig.Spec = { { name: "update-stack", description: - "Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack through the DescribeStacks action. To get a copy of the template for an existing stack, you can use the GetTemplate action. For more information about creating an update template, updating a stack, and monitoring the progress of the update, see Updating a Stack", + "Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack through the DescribeStacks action. To get a copy of the template for an existing stack, you can use the GetTemplate action. For more information about updating a stack and monitoring the progress of the update, see Managing Amazon Web Services resources as a single unit with CloudFormation stacks in the CloudFormation User Guide", options: [ { name: "--stack-name", @@ -5484,7 +5484,7 @@ const completionSpec: Fig.Spec = { { name: "--template-body", description: - "Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the CloudFormation User Guide.) Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true", + "Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true", args: { name: "string", generators: generators.listFiles, @@ -5493,7 +5493,7 @@ const completionSpec: Fig.Spec = { { name: "--template-url", description: - "Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true", + "Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true", args: { name: "string", }, @@ -5537,7 +5537,7 @@ const completionSpec: Fig.Spec = { { name: "--capabilities", description: - "In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability. You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Only one of the Capabilities and ResourceType parameters can be specified", + "In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account, for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability. You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified. For more information, see Perform custom processing on CloudFormation templates with template macros. Only one of the Capabilities and ResourceType parameters can be specified", args: { name: "list", isVariadic: true, @@ -5551,7 +5551,7 @@ const completionSpec: Fig.Spec = { { name: "--resource-types", description: - "The template resource types that you have permissions to work with for this update stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. If the list of resource types doesn't include a resource that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with Identity and Access Management. Only one of the Capabilities and ResourceType parameters can be specified", + "The template resource types that you have permissions to work with for this update stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. If the list of resource types doesn't include a resource that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. IAM uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Control access with Identity and Access Management. Only one of the Capabilities and ResourceType parameters can be specified", args: { name: "list", isVariadic: true, @@ -5560,7 +5560,7 @@ const completionSpec: Fig.Spec = { { name: "--role-arn", description: - "The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to update the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials", + "The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to update the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials", args: { name: "string", generators: generators.listRoleArns, @@ -5776,7 +5776,7 @@ const completionSpec: Fig.Spec = { { name: "--template-body", description: - "The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the CloudFormation User Guide. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL\u2014or set UsePreviousTemplate to true", + "The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL\u2014or set UsePreviousTemplate to true", args: { name: "string", generators: generators.listFiles, @@ -5785,7 +5785,7 @@ const completionSpec: Fig.Spec = { { name: "--template-url", description: - "The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, see Template Anatomy in the CloudFormation User Guide. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL\u2014or set UsePreviousTemplate to true", + "The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL\u2014or set UsePreviousTemplate to true", args: { name: "string", }, @@ -5811,7 +5811,7 @@ const completionSpec: Fig.Spec = { { name: "--capabilities", description: - "In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail", + "In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account, for example, by creating new IAM users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail", args: { name: "list", isVariadic: true, @@ -5949,7 +5949,7 @@ const completionSpec: Fig.Spec = { { name: "update-termination-protection", description: - "Updates termination protection for the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack", + "Updates termination protection for the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protect a CloudFormation stack from being deleted in the CloudFormation User Guide. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack", options: [ { name: "--enable-termination-protection", @@ -5998,7 +5998,7 @@ const completionSpec: Fig.Spec = { { name: "--template-body", description: - "Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the CloudFormation User Guide. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used", + "Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used", args: { name: "string", generators: generators.listFiles, @@ -6007,7 +6007,7 @@ const completionSpec: Fig.Spec = { { name: "--template-url", description: - "Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used", + "Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used", args: { name: "string", }, diff --git a/src/aws/codebuild.ts b/src/aws/codebuild.ts index e6d3bb716fcf..43c4d71a8a23 100644 --- a/src/aws/codebuild.ts +++ b/src/aws/codebuild.ts @@ -275,6 +275,13 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--proxy-configuration", + description: "The proxy configuration of the compute fleet", + args: { + name: "structure", + }, + }, { name: "--image-id", description: "The Amazon Machine Image (AMI) of the compute fleet", @@ -2908,6 +2915,13 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--proxy-configuration", + description: "The proxy configuration of the compute fleet", + args: { + name: "structure", + }, + }, { name: "--image-id", description: "The Amazon Machine Image (AMI) of the compute fleet", diff --git a/src/aws/codepipeline.ts b/src/aws/codepipeline.ts index f8e3c6f624dc..5604dec86db8 100644 --- a/src/aws/codepipeline.ts +++ b/src/aws/codepipeline.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "codepipeline", description: - "CodePipeline Overview This is the CodePipeline API Reference. This guide provides descriptions of the actions and data types for CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the CodePipeline User Guide. You can use the CodePipeline API to work with pipelines, stages, actions, and transitions. Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions. You can work with pipelines by calling: CreatePipeline, which creates a uniquely named pipeline. DeletePipeline, which deletes the specified pipeline. GetPipeline, which returns information about the pipeline structure and pipeline metadata, including the pipeline Amazon Resource Name (ARN). GetPipelineExecution, which returns information about a specific execution of a pipeline. GetPipelineState, which returns information about the current state of the stages and actions of a pipeline. ListActionExecutions, which returns action-level details for past executions. The details include full stage and action-level details, including individual action duration, status, any errors that occurred during the execution, and input and output artifact location details. ListPipelines, which gets a summary of all of the pipelines associated with your account. ListPipelineExecutions, which gets a summary of the most recent executions for a pipeline. StartPipelineExecution, which runs the most recent revision of an artifact through the pipeline. StopPipelineExecution, which stops the specified pipeline execution from continuing through the pipeline. UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline. Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see CodePipeline Pipeline Structure Reference. Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are: Source Build Test Deploy Approval Invoke Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete. You can work with transitions by calling: DisableStageTransition, which prevents artifacts from transitioning to the next stage in a pipeline. EnableStageTransition, which enables transition of artifacts between stages in a pipeline. Using the API to integrate with CodePipeline For third-party integrators or developers who want to create their own integrations with CodePipeline, the expected sequence varies from the standard API user. To integrate with CodePipeline, developers need to work with the following items: Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source. You can work with jobs by calling: AcknowledgeJob, which confirms whether a job worker has received the specified job. GetJobDetails, which returns the details of a job. PollForJobs, which determines whether there are any jobs to act on. PutJobFailureResult, which provides details of a job failure. PutJobSuccessResult, which provides details of a job success. Third party jobs, which are instances of an action created by a partner action and integrated into CodePipeline. Partner actions are created by members of the Amazon Web Services Partner Network. You can work with third party jobs by calling: AcknowledgeThirdPartyJob, which confirms whether a job worker has received the specified job. GetThirdPartyJobDetails, which requests the details of a job for a partner action. PollForThirdPartyJobs, which determines whether there are any jobs to act on. PutThirdPartyJobFailureResult, which provides details of a job failure. PutThirdPartyJobSuccessResult, which provides details of a job success", + "CodePipeline Overview This is the CodePipeline API Reference. This guide provides descriptions of the actions and data types for CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the CodePipeline User Guide. You can use the CodePipeline API to work with pipelines, stages, actions, and transitions. Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions. You can work with pipelines by calling: CreatePipeline, which creates a uniquely named pipeline. DeletePipeline, which deletes the specified pipeline. GetPipeline, which returns information about the pipeline structure and pipeline metadata, including the pipeline Amazon Resource Name (ARN). GetPipelineExecution, which returns information about a specific execution of a pipeline. GetPipelineState, which returns information about the current state of the stages and actions of a pipeline. ListActionExecutions, which returns action-level details for past executions. The details include full stage and action-level details, including individual action duration, status, any errors that occurred during the execution, and input and output artifact location details. ListPipelines, which gets a summary of all of the pipelines associated with your account. ListPipelineExecutions, which gets a summary of the most recent executions for a pipeline. StartPipelineExecution, which runs the most recent revision of an artifact through the pipeline. StopPipelineExecution, which stops the specified pipeline execution from continuing through the pipeline. UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline. Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see CodePipeline Pipeline Structure Reference. Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are: Source Build Test Deploy Approval Invoke Compute Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete. You can work with transitions by calling: DisableStageTransition, which prevents artifacts from transitioning to the next stage in a pipeline. EnableStageTransition, which enables transition of artifacts between stages in a pipeline. Using the API to integrate with CodePipeline For third-party integrators or developers who want to create their own integrations with CodePipeline, the expected sequence varies from the standard API user. To integrate with CodePipeline, developers need to work with the following items: Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source. You can work with jobs by calling: AcknowledgeJob, which confirms whether a job worker has received the specified job. GetJobDetails, which returns the details of a job. PollForJobs, which determines whether there are any jobs to act on. PutJobFailureResult, which provides details of a job failure. PutJobSuccessResult, which provides details of a job success. Third party jobs, which are instances of an action created by a partner action and integrated into CodePipeline. Partner actions are created by members of the Amazon Web Services Partner Network. You can work with third party jobs by calling: AcknowledgeThirdPartyJob, which confirms whether a job worker has received the specified job. GetThirdPartyJobDetails, which requests the details of a job for a partner action. PollForThirdPartyJobs, which determines whether there are any jobs to act on. PutThirdPartyJobFailureResult, which provides details of a job failure. PutThirdPartyJobSuccessResult, which provides details of a job success", subcommands: [ { name: "acknowledge-job", diff --git a/src/aws/dataexchange.ts b/src/aws/dataexchange.ts index 095ce48cf29c..e9e728ad5a8f 100644 --- a/src/aws/dataexchange.ts +++ b/src/aws/dataexchange.ts @@ -3,6 +3,37 @@ const completionSpec: Fig.Spec = { description: "AWS Data Exchange is a service that makes it easy for AWS customers to exchange data in the cloud. You can use the AWS Data Exchange APIs to create, update, manage, and access file-based data set in the AWS Cloud. As a subscriber, you can view and access the data sets that you have an entitlement to through a subscription. You can use the APIs to download or copy your entitled data sets to Amazon Simple Storage Service (Amazon S3) for use across a variety of AWS analytics and machine learning services. As a provider, you can create and manage your data sets that you would like to publish to a product. Being able to package and provide your data sets into products requires a few steps to determine eligibility. For more information, visit the AWS Data Exchange User Guide. A data set is a collection of data that can be changed or updated over time. Data sets can be updated using revisions, which represent a new version or incremental change to a data set. A revision contains one or more assets. An asset in AWS Data Exchange is a piece of data that can be stored as an Amazon S3 object, Redshift datashare, API Gateway API, AWS Lake Formation data permission, or Amazon S3 data access. The asset can be a structured data file, an image file, or some other data file. Jobs are asynchronous import or export operations used to create or copy assets", subcommands: [ + { + name: "accept-data-grant", + description: "This operation accepts a data grant", + options: [ + { + name: "--data-grant-arn", + description: + "The Amazon Resource Name (ARN) of the data grant to accept", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "cancel-job", description: @@ -34,6 +65,81 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-data-grant", + description: "This operation creates a data grant", + options: [ + { + name: "--name", + description: "The name of the data grant", + args: { + name: "string", + }, + }, + { + name: "--grant-distribution-scope", + description: "The distribution scope of the data grant", + args: { + name: "string", + }, + }, + { + name: "--receiver-principal", + description: + "The Amazon Web Services account ID of the data grant receiver", + args: { + name: "string", + }, + }, + { + name: "--source-data-set-id", + description: "The ID of the data set used to create the data grant", + args: { + name: "string", + }, + }, + { + name: "--ends-at", + description: + "The timestamp of when access to the associated data set ends", + args: { + name: "timestamp", + }, + }, + { + name: "--description", + description: "The description of the data grant", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: + "The tags to add to the data grant. A tag is a key-value pair", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-data-set", description: "This operation creates a data set", @@ -250,6 +356,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-data-grant", + description: "This operation deletes a data grant", + options: [ + { + name: "--data-grant-id", + description: "The ID of the data grant to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-data-set", description: "This operation deletes a data set", @@ -391,6 +527,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-data-grant", + description: "This operation returns information about a data grant", + options: [ + { + name: "--data-grant-id", + description: "The ID of the data grant", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-data-set", description: "This operation returns information about a data set", @@ -481,6 +647,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-received-data-grant", + description: + "This operation returns information about a received data grant", + options: [ + { + name: "--data-grant-arn", + description: "The Amazon Resource Name (ARN) of the data grant", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-revision", description: "This operation returns information about a revision", @@ -518,6 +715,69 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-data-grants", + description: "This operation returns information about all data grants", + options: [ + { + name: "--max-results", + description: + "The maximum number of results to be included in the next page", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "The pagination token used to retrieve the next page of results for this operation", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-data-set-revisions", description: @@ -592,7 +852,7 @@ const completionSpec: Fig.Spec = { { name: "list-data-sets", description: - "This operation lists your data sets. When listing by origin OWNED, results are sorted by CreatedAt in descending order. When listing by origin ENTITLED, there is no order and the maxResults parameter is ignored", + "This operation lists your data sets. When listing by origin OWNED, results are sorted by CreatedAt in descending order. When listing by origin ENTITLED, there is no order", options: [ { name: "--max-results", @@ -809,6 +1069,77 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-received-data-grants", + description: + "This operation returns information about all received data grants", + options: [ + { + name: "--max-results", + description: + "The maximum number of results to be included in the next page", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "The pagination token used to retrieve the next page of results for this operation", + args: { + name: "string", + }, + }, + { + name: "--acceptance-state", + description: "The acceptance state of the data grants to list", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-revision-assets", description: diff --git a/src/aws/dms.ts b/src/aws/dms.ts index 6e022f61dbb4..e9b219a4cdc4 100644 --- a/src/aws/dms.ts +++ b/src/aws/dms.ts @@ -154,6 +154,100 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-data-migration", + description: "Creates a data migration using the provided settings", + options: [ + { + name: "--data-migration-name", + description: + "A user-friendly name for the data migration. Data migration names have the following constraints: Must begin with a letter, and can only contain ASCII letters, digits, and hyphens. Can't end with a hyphen or contain two consecutive hyphens. Length must be from 1 to 255 characters", + args: { + name: "string", + }, + }, + { + name: "--migration-project-identifier", + description: "An identifier for the migration project", + args: { + name: "string", + }, + }, + { + name: "--data-migration-type", + description: + "Specifies if the data migration is full-load only, change data capture (CDC) only, or full-load and CDC", + args: { + name: "string", + }, + }, + { + name: "--service-access-role-arn", + description: + "The Amazon Resource Name (ARN) for the service access role that you want to use to create the data migration", + args: { + name: "string", + }, + }, + { + name: "--enable-cloudwatch-logs", + description: + "Specifies whether to enable CloudWatch logs for the data migration", + }, + { + name: "--no-enable-cloudwatch-logs", + description: + "Specifies whether to enable CloudWatch logs for the data migration", + }, + { + name: "--source-data-settings", + description: "Specifies information about the source data provider", + args: { + name: "list", + }, + }, + { + name: "--number-of-jobs", + description: + "The number of parallel jobs that trigger parallel threads to unload the tables from the source, and then load them to the target", + args: { + name: "integer", + }, + }, + { + name: "--tags", + description: "One or more tags to be assigned to the data migration", + args: { + name: "list", + }, + }, + { + name: "--selection-rules", + description: + "An optional JSON string specifying what tables, views, and schemas to include or exclude from the migration", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-data-provider", description: @@ -238,7 +332,7 @@ const completionSpec: Fig.Spec = { { name: "--engine-name", description: - 'The type of engine for the endpoint. Valid values, depending on the EndpointType value, include "mysql", "oracle", "postgres", "mariadb", "aurora", "aurora-postgresql", "opensearch", "redshift", "s3", "db2", "db2-zos", "azuredb", "sybase", "dynamodb", "mongodb", "kinesis", "kafka", "elasticsearch", "docdb", "sqlserver", "neptune", and "babelfish"', + 'The type of engine for the endpoint. Valid values, depending on the EndpointType value, include "mysql", "oracle", "postgres", "mariadb", "aurora", "aurora-postgresql", "opensearch", "redshift", "s3", "db2", "db2-zos", "azuredb", "sybase", "dynamodb", "mongodb", "kinesis", "kafka", "elasticsearch", "docdb", "sqlserver", "neptune", "babelfish", redshift-serverless, aurora-serverless, aurora-postgresql-serverless, gcp-mysql, azure-sql-managed-instance, redis, dms-transfer', args: { name: "string", }, @@ -1101,7 +1195,7 @@ const completionSpec: Fig.Spec = { { name: "--replication-subnet-group-identifier", description: - 'The name for the replication subnet group. This value is stored as a lowercase string. Constraints: Must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens. Must not be "default". Example: mySubnetgroup', + 'The name for the replication subnet group. This value is stored as a lowercase string. Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, or hyphens. Must not be "default". Example: mySubnetgroup', args: { name: "string", }, @@ -1344,6 +1438,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-data-migration", + description: "Deletes the specified data migration", + options: [ + { + name: "--data-migration-identifier", + description: + "The identifier (name or ARN) of the data migration to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-data-provider", description: @@ -1998,6 +2123,96 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "describe-data-migrations", + description: "Returns information about data migrations", + options: [ + { + name: "--filters", + description: "Filters applied to the data migrations", + args: { + name: "list", + }, + }, + { + name: "--max-records", + description: + "The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved", + args: { + name: "integer", + }, + }, + { + name: "--marker", + description: + "An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords", + args: { + name: "string", + }, + }, + { + name: "--without-settings", + description: + "An option to set to avoid returning information about settings. Use this to reduce overhead when setting information is too large. To use this option, choose true; otherwise, choose false (the default)", + }, + { + name: "--no-without-settings", + description: + "An option to set to avoid returning information about settings. Use this to reduce overhead when setting information is too large. To use this option, choose true; otherwise, choose false (the default)", + }, + { + name: "--without-statistics", + description: + "An option to set to avoid returning information about statistics. Use this to reduce overhead when statistics information is too large. To use this option, choose true; otherwise, choose false (the default)", + }, + { + name: "--no-without-statistics", + description: + "An option to set to avoid returning information about statistics. Use this to reduce overhead when statistics information is too large. To use this option, choose true; otherwise, choose false (the default)", + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "describe-data-providers", description: @@ -4341,6 +4556,92 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "modify-data-migration", + description: "Modifies an existing DMS data migration", + options: [ + { + name: "--data-migration-identifier", + description: + "The identifier (name or ARN) of the data migration to modify", + args: { + name: "string", + }, + }, + { + name: "--data-migration-name", + description: "The new name for the data migration", + args: { + name: "string", + }, + }, + { + name: "--enable-cloudwatch-logs", + description: + "Whether to enable Cloudwatch logs for the data migration", + }, + { + name: "--no-enable-cloudwatch-logs", + description: + "Whether to enable Cloudwatch logs for the data migration", + }, + { + name: "--service-access-role-arn", + description: "The new service access role ARN for the data migration", + args: { + name: "string", + }, + }, + { + name: "--data-migration-type", + description: "The new migration type for the data migration", + args: { + name: "string", + }, + }, + { + name: "--source-data-settings", + description: + "The new information about the source data provider for the data migration", + args: { + name: "list", + }, + }, + { + name: "--number-of-jobs", + description: + "The number of parallel jobs that trigger parallel threads to unload the tables from the source, and then load them to the target", + args: { + name: "integer", + }, + }, + { + name: "--selection-rules", + description: + "A JSON-formatted string that defines what objects to include and exclude from the migration", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "modify-data-provider", description: @@ -5599,6 +5900,45 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "start-data-migration", + description: "Starts the specified data migration", + options: [ + { + name: "--data-migration-identifier", + description: + "The identifier (name or ARN) of the data migration to start", + args: { + name: "string", + }, + }, + { + name: "--start-type", + description: + "Specifies the start type for the data migration. Valid values include start-replication, reload-target, and resume-processing", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "start-extension-pack-association", description: @@ -6143,6 +6483,45 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--tags", + description: + "One or more tags to be assigned to the premigration assessment run that you want to start", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "stop-data-migration", + description: "Stops the specified data migration", + options: [ + { + name: "--data-migration-identifier", + description: + "The identifier (name or ARN) of the data migration to stop", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/ecs.ts b/src/aws/ecs.ts index b3579a90b860..a869c69ea688 100644 --- a/src/aws/ecs.ts +++ b/src/aws/ecs.ts @@ -513,7 +513,7 @@ const completionSpec: Fig.Spec = { { name: "create-service", description: - "Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, use UpdateService. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. You can use UpdateService. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service", + "Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, use UpdateService. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. You can use UpdateService. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide", options: [ { name: "--cluster", @@ -3116,7 +3116,7 @@ const completionSpec: Fig.Spec = { { name: "run-task", description: - "Starts a new task using the specified task definition. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command. To manage eventual consistency, you can do the following: Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time. Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time", + "Starts a new task using the specified task definition. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command. To manage eventual consistency, you can do the following: Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time. Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time", options: [ { name: "--capacity-provider-strategy", @@ -3244,7 +3244,7 @@ const completionSpec: Fig.Spec = { { name: "--reference-id", description: - "The reference ID to use for the task. The reference ID can have a maximum length of 1024 characters", + "This parameter is only used by Amazon ECS. It is not intended for use by customers", args: { name: "string", }, @@ -3320,7 +3320,7 @@ const completionSpec: Fig.Spec = { { name: "start-task", description: - "Starts a new task from the specified task definition on the specified container instance or instances. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. Alternatively, you can useRunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide", + "Starts a new task from the specified task definition on the specified container instance or instances. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. Alternatively, you can useRunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide", options: [ { name: "--cluster", @@ -3393,7 +3393,8 @@ const completionSpec: Fig.Spec = { }, { name: "--reference-id", - description: "The reference ID to use for the task", + description: + "This parameter is only used by Amazon ECS. It is not intended for use by customers", args: { name: "string", }, diff --git a/src/aws/eks.ts b/src/aws/eks.ts index 2c4470d3a651..e2271f2bd01d 100644 --- a/src/aws/eks.ts +++ b/src/aws/eks.ts @@ -666,7 +666,8 @@ const completionSpec: Fig.Spec = { options: [ { name: "--name", - description: "The unique name to give to your cluster", + description: + "The unique name to give to your cluster. The name can contain only alphanumeric characters (case-sensitive), hyphens, and underscores. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the Amazon Web Services Region and Amazon Web Services account that you're creating the cluster in", args: { name: "string", }, @@ -761,6 +762,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--zonal-shift-config", + description: + "Enable or disable ARC zonal shift for the cluster. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster. Zonal shift is a feature of Amazon Application Recovery Controller (ARC). ARC zonal shift is designed to be a temporary measure that allows you to move traffic for a resource away from an impaired AZ until the zonal shift expires or you cancel it. You can extend the zonal shift if necessary. You can start a zonal shift for an EKS cluster, or you can allow Amazon Web Services to do it for you by enabling zonal autoshift. This shift updates the flow of east-to-west network traffic in your cluster to only consider network endpoints for Pods running on worker nodes in healthy AZs. Additionally, any ALB or NLB handling ingress traffic for applications in your EKS cluster will automatically route traffic to targets in the healthy AZs. For more information about zonal shift in EKS, see Learn about Amazon Application Recovery Controller (ARC) Zonal Shift in Amazon EKS in the Amazon EKS User Guide", + args: { + name: "structure", + }, + }, { name: "--kubernetes-version", description: @@ -3386,7 +3395,7 @@ const completionSpec: Fig.Spec = { { name: "update-cluster-config", description: - "Updates an Amazon EKS cluster configuration. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with DescribeUpdate\"/>. You can use this API operation to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. You can also use this API operation to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide . You can also use this API operation to choose different subnets and security groups for the cluster. You must specify at least two subnets that are in different Availability Zones. You can't change which VPC the subnets are from, the subnets must be in the same VPC as the subnets that the cluster was created with. For more information about the VPC requirements, see https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html in the Amazon EKS User Guide . Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active", + "Updates an Amazon EKS cluster configuration. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with DescribeUpdate\"/>. You can use this API operation to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. You can also use this API operation to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide . You can also use this API operation to choose different subnets and security groups for the cluster. You must specify at least two subnets that are in different Availability Zones. You can't change which VPC the subnets are from, the subnets must be in the same VPC as the subnets that the cluster was created with. For more information about the VPC requirements, see https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html in the Amazon EKS User Guide . You can also use this API operation to enable or disable ARC zonal shift. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster. Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active", options: [ { name: "--name", @@ -3435,6 +3444,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--zonal-shift-config", + description: + "Enable or disable ARC zonal shift for the cluster. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster. Zonal shift is a feature of Amazon Application Recovery Controller (ARC). ARC zonal shift is designed to be a temporary measure that allows you to move traffic for a resource away from an impaired AZ until the zonal shift expires or you cancel it. You can extend the zonal shift if necessary. You can start a zonal shift for an EKS cluster, or you can allow Amazon Web Services to do it for you by enabling zonal autoshift. This shift updates the flow of east-to-west network traffic in your cluster to only consider network endpoints for Pods running on worker nodes in healthy AZs. Additionally, any ALB or NLB handling ingress traffic for applications in your EKS cluster will automatically route traffic to targets in the healthy AZs. For more information about zonal shift in EKS, see Learn about Amazon Application Recovery Controller (ARC) Zonal Shift in Amazon EKS in the Amazon EKS User Guide", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/elastic-inference.ts b/src/aws/elastic-inference.ts index b672f23ca211..26105a8b7ef1 100644 --- a/src/aws/elastic-inference.ts +++ b/src/aws/elastic-inference.ts @@ -1,12 +1,12 @@ const completionSpec: Fig.Spec = { name: "elastic-inference", description: - "Elastic Inference public APIs. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service", + "Amazon Elastic Inference is no longer available. Elastic Inference public APIs", subcommands: [ { name: "describe-accelerator-offerings", description: - "Describes the locations in which a given accelerator type or set of types is present in a given region. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service", + "Amazon Elastic Inference is no longer available. Describes the locations in which a given accelerator type or set of types is present in a given region", options: [ { name: "--location-type", @@ -45,7 +45,7 @@ const completionSpec: Fig.Spec = { { name: "describe-accelerator-types", description: - "Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service", + "Amazon Elastic Inference is no longer available. Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput", options: [ { name: "--cli-input-json", @@ -69,7 +69,7 @@ const completionSpec: Fig.Spec = { { name: "describe-accelerators", description: - "Describes information over a provided set of accelerators belonging to an account. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service", + "Amazon Elastic Inference is no longer available. Describes information over a provided set of accelerators belonging to an account", options: [ { name: "--accelerator-ids", @@ -148,7 +148,7 @@ const completionSpec: Fig.Spec = { { name: "list-tags-for-resource", description: - "Returns all tags of an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service", + "Amazon Elastic Inference is no longer available. Returns all tags of an Elastic Inference Accelerator", options: [ { name: "--resource-arn", @@ -180,7 +180,7 @@ const completionSpec: Fig.Spec = { { name: "tag-resource", description: - "Adds the specified tags to an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service", + "Amazon Elastic Inference is no longer available. Adds the specified tags to an Elastic Inference Accelerator", options: [ { name: "--resource-arn", @@ -218,7 +218,7 @@ const completionSpec: Fig.Spec = { { name: "untag-resource", description: - "Removes the specified tags from an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service", + "Amazon Elastic Inference is no longer available. Removes the specified tags from an Elastic Inference Accelerator", options: [ { name: "--resource-arn", diff --git a/src/aws/fms.ts b/src/aws/fms.ts index 5c1385cb783e..ea39ef0150f6 100644 --- a/src/aws/fms.ts +++ b/src/aws/fms.ts @@ -736,7 +736,7 @@ const completionSpec: Fig.Spec = { { name: "--policy-id", description: - "The ID of the Firewall Manager policy that you want the details for. You can get violation details for the following policy types: DNS Firewall Imported Network Firewall Network Firewall Security group content audit Network ACL Third-party firewall", + "The ID of the Firewall Manager policy that you want the details for. You can get violation details for the following policy types: WAF DNS Firewall Imported Network Firewall Network Firewall Security group content audit Network ACL Third-party firewall", args: { name: "string", }, @@ -759,7 +759,7 @@ const completionSpec: Fig.Spec = { { name: "--resource-type", description: - "The resource type. This is in the format shown in the Amazon Web Services Resource Types Reference. Supported resource types are: AWS::EC2::Instance, AWS::EC2::NetworkInterface, AWS::EC2::SecurityGroup, AWS::NetworkFirewall::FirewallPolicy, and AWS::EC2::Subnet", + "The resource type. This is in the format shown in the Amazon Web Services Resource Types Reference. Supported resource types are: AWS::WAFv2::WebACL, AWS::EC2::Instance, AWS::EC2::NetworkInterface, AWS::EC2::SecurityGroup, AWS::NetworkFirewall::FirewallPolicy, and AWS::EC2::Subnet", args: { name: "string", }, diff --git a/src/aws/imagebuilder.ts b/src/aws/imagebuilder.ts index b189d3f54999..83d3132ba26b 100644 --- a/src/aws/imagebuilder.ts +++ b/src/aws/imagebuilder.ts @@ -304,7 +304,7 @@ const completionSpec: Fig.Spec = { { name: "--kms-key-id", description: - "Identifies which KMS key is used to encrypt the container image", + "Identifies which KMS key is used to encrypt the Dockerfile template", args: { name: "string", }, @@ -831,7 +831,7 @@ const completionSpec: Fig.Spec = { { name: "--resource-tags", description: - "The tags attached to the resource created by Image Builder", + "The metadata tags to assign to the Amazon EC2 instance that Image Builder launches during the build process. Tags are formatted as key value pairs", args: { name: "map", }, @@ -846,11 +846,20 @@ const completionSpec: Fig.Spec = { }, { name: "--tags", - description: "The tags of the infrastructure configuration", + description: + "The metadata tags to assign to the infrastructure configuration resource that Image Builder creates as output. Tags are formatted as key value pairs", args: { name: "map", }, }, + { + name: "--placement", + description: + "The instance placement settings that define where the instances that are launched from your image will run", + args: { + name: "structure", + }, + }, { name: "--client-token", description: @@ -2054,7 +2063,7 @@ const completionSpec: Fig.Spec = { { name: "list-component-build-versions", description: - "Returns the list of component build versions for the specified semantic version. The semantic version has four nodes: ../. You can assign values for the first three, and can filter on all of them. Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards", + "Returns the list of component build versions for the specified component version Amazon Resource Name (ARN)", options: [ { name: "--component-version-arn", @@ -3826,14 +3835,6 @@ const completionSpec: Fig.Spec = { name: "string", }, }, - { - name: "--client-token", - description: - "Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference", - args: { - name: "string", - }, - }, { name: "--resource-tags", description: @@ -3850,6 +3851,22 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--placement", + description: + "The instance placement settings that define where the instances that are launched from your image will run", + args: { + name: "structure", + }, + }, + { + name: "--client-token", + description: + "Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/ivs.ts b/src/aws/ivs.ts index 5acb15b0364c..36b92f83e1d6 100644 --- a/src/aws/ivs.ts +++ b/src/aws/ivs.ts @@ -101,11 +101,24 @@ const completionSpec: Fig.Spec = { "Creates a new channel and an associated stream key to start streaming", options: [ { - name: "--name", - description: "Channel name", - args: { - name: "string", - }, + name: "--authorized", + description: + "Whether the channel is private (enabled for playback authorization). Default: false", + }, + { + name: "--no-authorized", + description: + "Whether the channel is private (enabled for playback authorization). Default: false", + }, + { + name: "--insecure-ingest", + description: + "Whether the channel allows insecure RTMP and SRT ingest. Default: false", + }, + { + name: "--no-insecure-ingest", + description: + "Whether the channel allows insecure RTMP and SRT ingest. Default: false", }, { name: "--latency-mode", @@ -116,22 +129,27 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--type", - description: - "Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types", + name: "--name", + description: "Channel name", args: { name: "string", }, }, { - name: "--authorized", + name: "--playback-restriction-policy-arn", description: - "Whether the channel is private (enabled for playback authorization). Default: false", + 'Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. Default: "" (empty string, no playback restriction policy is applied)', + args: { + name: "string", + }, }, { - name: "--no-authorized", + name: "--preset", description: - "Whether the channel is private (enabled for playback authorization). Default: false", + 'Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string ("")', + args: { + name: "string", + }, }, { name: "--recording-configuration-arn", @@ -150,27 +168,9 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--insecure-ingest", - description: - "Whether the channel allows insecure RTMP and SRT ingest. Default: false", - }, - { - name: "--no-insecure-ingest", - description: - "Whether the channel allows insecure RTMP and SRT ingest. Default: false", - }, - { - name: "--preset", - description: - 'Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string ("")', - args: { - name: "string", - }, - }, - { - name: "--playback-restriction-policy-arn", + name: "--type", description: - 'Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. Default: "" (empty string, no playback restriction policy is applied)', + "Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types", args: { name: "string", }, @@ -266,49 +266,49 @@ const completionSpec: Fig.Spec = { "Creates a new recording configuration, used to enable recording to Amazon S3. Known issue: In the us-east-1 region, if you use the Amazon Web Services CLI to create a recording configuration, it returns success even if the S3 bucket is in a different region. In this case, the state of the recording configuration is CREATE_FAILED (instead of ACTIVE). (In other regions, the CLI correctly returns failure if the bucket is in a different region.) Workaround: Ensure that your S3 bucket is in the same region as the recording configuration. If you create a recording configuration in a different region as your S3 bucket, delete that recording configuration and create a new one with an S3 bucket from the correct region", options: [ { - name: "--name", + name: "--destination-configuration", description: - "Recording-configuration name. The value does not need to be unique", + "A complex type that contains a destination configuration for where recorded video will be stored", args: { - name: "string", + name: "structure", }, }, { - name: "--destination-configuration", + name: "--name", description: - "A complex type that contains a destination configuration for where recorded video will be stored", + "Recording-configuration name. The value does not need to be unique", args: { - name: "structure", + name: "string", }, }, { - name: "--tags", + name: "--recording-reconnect-window-seconds", description: - 'Array of 1-50 maps, each of the form string:string (key:value). See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', + "If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together. Default: 0", args: { - name: "map", + name: "integer", }, }, { - name: "--thumbnail-configuration", + name: "--rendition-configuration", description: - "A complex type that allows you to enable/disable the recording of thumbnails for a live session and modify the interval at which thumbnails are generated for the live session", + "Object that describes which renditions should be recorded for a stream", args: { name: "structure", }, }, { - name: "--recording-reconnect-window-seconds", + name: "--tags", description: - "If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together. Default: 0", + 'Array of 1-50 maps, each of the form string:string (key:value). See Best practices and strategies in Tagging Amazon Web Services Resources and Tag Editor for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no service-specific constraints beyond what is documented there', args: { - name: "integer", + name: "map", }, }, { - name: "--rendition-configuration", + name: "--thumbnail-configuration", description: - "Object that describes which renditions should be recorded for a stream", + "A complex type that allows you to enable/disable the recording of thumbnails for a live session and modify the interval at which thumbnails are generated for the live session", args: { name: "structure", }, @@ -753,16 +753,16 @@ const completionSpec: Fig.Spec = { "Imports the public portion of a new key pair and returns its arn and fingerprint. The privateKey can then be used to generate viewer authorization tokens, to grant viewers access to private channels. For more information, see Setting Up Private Channels in the Amazon IVS User Guide", options: [ { - name: "--public-key-material", - description: "The public portion of a customer-generated key pair", + name: "--name", + description: + "Playback-key-pair name. The value does not need to be unique", args: { name: "string", }, }, { - name: "--name", - description: - "Playback-key-pair name. The value does not need to be unique", + name: "--public-key-material", + description: "The public portion of a customer-generated key pair", args: { name: "string", }, @@ -806,14 +806,6 @@ const completionSpec: Fig.Spec = { name: "string", }, }, - { - name: "--filter-by-recording-configuration-arn", - description: - "Filters the channel list to match the specified recording-configuration ARN", - args: { - name: "string", - }, - }, { name: "--filter-by-playback-restriction-policy-arn", description: "Filters the channel list to match the specified policy", @@ -822,9 +814,9 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--next-token", + name: "--filter-by-recording-configuration-arn", description: - "The first channel to retrieve. This is used for pagination; see the nextToken response field", + "Filters the channel list to match the specified recording-configuration ARN", args: { name: "string", }, @@ -836,6 +828,14 @@ const completionSpec: Fig.Spec = { name: "integer", }, }, + { + name: "--next-token", + description: + "The first channel to retrieve. This is used for pagination; see the nextToken response field", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -885,19 +885,19 @@ const completionSpec: Fig.Spec = { "Gets summary information about playback key pairs. For more information, see Setting Up Private Channels in the Amazon IVS User Guide", options: [ { - name: "--next-token", + name: "--max-results", description: - "The first key pair to retrieve. This is used for pagination; see the nextToken response field", + "Maximum number of key pairs to return. Default: your service quota or 100, whichever is smaller", args: { - name: "string", + name: "integer", }, }, { - name: "--max-results", + name: "--next-token", description: - "Maximum number of key pairs to return. Default: your service quota or 100, whichever is smaller", + "The first key pair to retrieve. This is used for pagination; see the nextToken response field", args: { - name: "integer", + name: "string", }, }, { @@ -949,18 +949,18 @@ const completionSpec: Fig.Spec = { "Gets summary information about playback restriction policies", options: [ { - name: "--next-token", - description: - "The first policy to retrieve. This is used for pagination; see the nextToken response field", + name: "--max-results", + description: "Maximum number of policies to return. Default: 1", args: { - name: "string", + name: "integer", }, }, { - name: "--max-results", - description: "Maximum number of policies to return. Default: 1", + name: "--next-token", + description: + "The first policy to retrieve. This is used for pagination; see the nextToken response field", args: { - name: "integer", + name: "string", }, }, { @@ -988,19 +988,19 @@ const completionSpec: Fig.Spec = { "Gets summary information about all recording configurations in your account, in the Amazon Web Services region where the API request is processed", options: [ { - name: "--next-token", + name: "--max-results", description: - "The first recording configuration to retrieve. This is used for pagination; see the nextToken response field", + "Maximum number of recording configurations to return. Default: your service quota or 100, whichever is smaller", args: { - name: "string", + name: "integer", }, }, { - name: "--max-results", + name: "--next-token", description: - "Maximum number of recording configurations to return. Default: your service quota or 100, whichever is smaller", + "The first recording configuration to retrieve. This is used for pagination; see the nextToken response field", args: { - name: "integer", + name: "string", }, }, { @@ -1059,18 +1059,18 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--next-token", - description: - "The first stream key to retrieve. This is used for pagination; see the nextToken response field", + name: "--max-results", + description: "Maximum number of streamKeys to return. Default: 1", args: { - name: "string", + name: "integer", }, }, { - name: "--max-results", - description: "Maximum number of streamKeys to return. Default: 1", + name: "--next-token", + description: + "The first stream key to retrieve. This is used for pagination; see the nextToken response field", args: { - name: "integer", + name: "string", }, }, { @@ -1129,18 +1129,18 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--next-token", - description: - "The first stream to retrieve. This is used for pagination; see the nextToken response field", + name: "--max-results", + description: "Maximum number of streams to return. Default: 100", args: { - name: "string", + name: "integer", }, }, { - name: "--max-results", - description: "Maximum number of streams to return. Default: 100", + name: "--next-token", + description: + "The first stream to retrieve. This is used for pagination; see the nextToken response field", args: { - name: "integer", + name: "string", }, }, { @@ -1176,18 +1176,18 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--next-token", - description: - "The first stream to retrieve. This is used for pagination; see the nextToken response field", + name: "--max-results", + description: "Maximum number of streams to return. Default: 100", args: { - name: "string", + name: "integer", }, }, { - name: "--max-results", - description: "Maximum number of streams to return. Default: 100", + name: "--next-token", + description: + "The first stream to retrieve. This is used for pagination; see the nextToken response field", args: { - name: "integer", + name: "string", }, }, { @@ -1477,55 +1477,47 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--name", - description: "Channel name", - args: { - name: "string", - }, - }, - { - name: "--latency-mode", + name: "--authorized", description: - "Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers", - args: { - name: "string", - }, + "Whether the channel is private (enabled for playback authorization)", }, { - name: "--type", + name: "--no-authorized", description: - "Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types", - args: { - name: "string", - }, + "Whether the channel is private (enabled for playback authorization)", }, { - name: "--authorized", + name: "--insecure-ingest", description: - "Whether the channel is private (enabled for playback authorization)", + "Whether the channel allows insecure RTMP and SRT ingest. Default: false", }, { - name: "--no-authorized", + name: "--no-insecure-ingest", description: - "Whether the channel is private (enabled for playback authorization)", + "Whether the channel allows insecure RTMP and SRT ingest. Default: false", }, { - name: "--recording-configuration-arn", + name: "--latency-mode", description: - "Recording-configuration ARN. A valid ARN value here both specifies the ARN and enables recording. If this is set to an empty string, recording is disabled", + "Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers", args: { name: "string", }, }, { - name: "--insecure-ingest", - description: - "Whether the channel allows insecure RTMP and SRT ingest. Default: false", + name: "--name", + description: "Channel name", + args: { + name: "string", + }, }, { - name: "--no-insecure-ingest", + name: "--playback-restriction-policy-arn", description: - "Whether the channel allows insecure RTMP and SRT ingest. Default: false", + "Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. If this is set to an empty string, playback restriction policy is disabled", + args: { + name: "string", + }, }, { name: "--preset", @@ -1536,9 +1528,17 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--playback-restriction-policy-arn", + name: "--recording-configuration-arn", description: - "Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. If this is set to an empty string, playback restriction policy is disabled", + "Recording-configuration ARN. A valid ARN value here both specifies the ARN and enables recording. If this is set to an empty string, recording is disabled", + args: { + name: "string", + }, + }, + { + name: "--type", + description: + "Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable input resolution or bitrate, the stream probably will disconnect immediately. Default: STANDARD. For details, see Channel Types", args: { name: "string", }, @@ -1566,13 +1566,6 @@ const completionSpec: Fig.Spec = { name: "update-playback-restriction-policy", description: "Updates a specified playback restriction policy", options: [ - { - name: "--arn", - description: "ARN of the playback-restriction-policy to be updated", - args: { - name: "string", - }, - }, { name: "--allowed-countries", description: @@ -1589,6 +1582,13 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--arn", + description: "ARN of the playback-restriction-policy to be updated", + args: { + name: "string", + }, + }, { name: "--enable-strict-origin-enforcement", description: diff --git a/src/aws/m2.ts b/src/aws/m2.ts index 2b79d43fe169..6325232594ce 100644 --- a/src/aws/m2.ts +++ b/src/aws/m2.ts @@ -14,6 +14,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--auth-secrets-manager-arn", + description: + "The Amazon Web Services Secrets Manager containing user's credentials for authentication and authorization for Cancel Batch Job Execution operation", + args: { + name: "string", + }, + }, { name: "--execution-id", description: "The unique identifier of the batch job execution", @@ -48,7 +56,7 @@ const completionSpec: Fig.Spec = { { name: "--client-token", description: - "Unique, case-sensitive identifier the service generates to ensure the idempotency of the request to create an application. The service generates the clientToken when the API call is triggered. The token expires after one hour, so if you retry the API within this timeframe with the same clientToken, you will get the same response. The service also handles deleting the clientToken after it expires", + "A client token is a unique, case-sensitive string of up to 128 ASCII characters with ASCII values of 33-126 inclusive. It's generated by the client to ensure idempotent operations, allowing for safe retries without unintended side effects", args: { name: "string", }, @@ -1068,7 +1076,7 @@ const completionSpec: Fig.Spec = { { name: "list-batch-job-restart-points", description: - "Lists all the job steps for JCL files to restart a batch job. This is only applicable for Micro Focus engine with versions 8.0.6 and above", + "Lists all the job steps for a JCL file to restart a batch job. This is only applicable for Micro Focus engine with versions 8.0.6 and above", options: [ { name: "--application-id", @@ -1077,9 +1085,17 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--auth-secrets-manager-arn", + description: + "The Amazon Web Services Secrets Manager containing user's credentials for authentication and authorization for List Batch Job Restart Points operation", + args: { + name: "string", + }, + }, { name: "--execution-id", - description: "The unique identifier of each batch job execution", + description: "The unique identifier of the batch job execution", args: { name: "string", }, @@ -1549,6 +1565,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--auth-secrets-manager-arn", + description: + "The Amazon Web Services Secrets Manager containing user's credentials for authentication and authorization for Start Batch Job execution operation", + args: { + name: "string", + }, + }, { name: "--batch-job-identifier", description: "The unique identifier of the batch job", diff --git a/src/aws/mailmanager.ts b/src/aws/mailmanager.ts index 117193cf89c0..e2787ba71941 100644 --- a/src/aws/mailmanager.ts +++ b/src/aws/mailmanager.ts @@ -1586,6 +1586,16 @@ const completionSpec: Fig.Spec = { name: "timestamp", }, }, + { + name: "--include-metadata", + description: + "Whether to include message metadata as JSON files in the export", + }, + { + name: "--no-include-metadata", + description: + "Whether to include message metadata as JSON files in the export", + }, { name: "--max-results", description: diff --git a/src/aws/marketplace-reporting.ts b/src/aws/marketplace-reporting.ts index c2590ed355d7..d7f51e7d52c1 100644 --- a/src/aws/marketplace-reporting.ts +++ b/src/aws/marketplace-reporting.ts @@ -44,5 +44,4 @@ const completionSpec: Fig.Spec = { }, ], }; - export default completionSpec; diff --git a/src/aws/neptune-graph.ts b/src/aws/neptune-graph.ts index 71312a6d996c..f3ed0bcc3f67 100644 --- a/src/aws/neptune-graph.ts +++ b/src/aws/neptune-graph.ts @@ -317,7 +317,7 @@ const completionSpec: Fig.Spec = { { name: "--format", description: - "Specifies the format of S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format or OPENCYPHER, which identies the openCypher load format", + "Specifies the format of S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format, OPEN_CYPHER, which identifies the openCypher load format, or ntriples, which identifies the RDF n-triples format", args: { name: "string", }, diff --git a/src/aws/payment-cryptography-data.ts b/src/aws/payment-cryptography-data.ts index 285ea9fd0c0e..23c3d92db1a6 100644 --- a/src/aws/payment-cryptography-data.ts +++ b/src/aws/payment-cryptography-data.ts @@ -6,7 +6,7 @@ const completionSpec: Fig.Spec = { { name: "decrypt-data", description: - "Decrypts ciphertext data to plaintext using a symmetric (TDES, AES), asymmetric (RSA), or derived (DUKPT or EMV) encryption key scheme. For more information, see Decrypt data in the Amazon Web Services Payment Cryptography User Guide. You can use an encryption key generated within Amazon Web Services Payment Cryptography, or you can import your own encryption key by calling ImportKey. For this operation, the key must have KeyModesOfUse set to Decrypt. In asymmetric decryption, Amazon Web Services Payment Cryptography decrypts the ciphertext using the private component of the asymmetric encryption key pair. For data encryption outside of Amazon Web Services Payment Cryptography, you can export the public component of the asymmetric key pair by calling GetPublicCertificate. For symmetric and DUKPT decryption, Amazon Web Services Payment Cryptography supports TDES and AES algorithms. For EMV decryption, Amazon Web Services Payment Cryptography supports TDES algorithms. For asymmetric decryption, Amazon Web Services Payment Cryptography supports RSA. When you use TDES or TDES DUKPT, the ciphertext data length must be a multiple of 8 bytes. For AES or AES DUKPT, the ciphertext data length must be a multiple of 16 bytes. For RSA, it sould be equal to the key size unless padding is enabled. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: EncryptData GetPublicCertificate ImportKey", + "Decrypts ciphertext data to plaintext using a symmetric (TDES, AES), asymmetric (RSA), or derived (DUKPT or EMV) encryption key scheme. For more information, see Decrypt data in the Amazon Web Services Payment Cryptography User Guide. You can use an decryption key generated within Amazon Web Services Payment Cryptography, or you can import your own decryption key by calling ImportKey. For this operation, the key must have KeyModesOfUse set to Decrypt. In asymmetric decryption, Amazon Web Services Payment Cryptography decrypts the ciphertext using the private component of the asymmetric encryption key pair. For data encryption outside of Amazon Web Services Payment Cryptography, you can export the public component of the asymmetric key pair by calling GetPublicCertificate. This operation also supports dynamic keys, allowing you to pass a dynamic decryption key as a TR-31 WrappedKeyBlock. This can be used when key material is frequently rotated, such as during every card transaction, and there is need to avoid importing short-lived keys into Amazon Web Services Payment Cryptography. To decrypt using dynamic keys, the keyARN is the Key Encryption Key (KEK) of the TR-31 wrapped decryption key material. The incoming wrapped key shall have a key purpose of D0 with a mode of use of B or D. For more information, see Using Dynamic Keys in the Amazon Web Services Payment Cryptography User Guide. For symmetric and DUKPT decryption, Amazon Web Services Payment Cryptography supports TDES and AES algorithms. For EMV decryption, Amazon Web Services Payment Cryptography supports TDES algorithms. For asymmetric decryption, Amazon Web Services Payment Cryptography supports RSA. When you use TDES or TDES DUKPT, the ciphertext data length must be a multiple of 8 bytes. For AES or AES DUKPT, the ciphertext data length must be a multiple of 16 bytes. For RSA, it sould be equal to the key size unless padding is enabled. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: EncryptData GetPublicCertificate ImportKey", options: [ { name: "--key-identifier", @@ -61,7 +61,7 @@ const completionSpec: Fig.Spec = { { name: "encrypt-data", description: - "Encrypts plaintext data to ciphertext using a symmetric (TDES, AES), asymmetric (RSA), or derived (DUKPT or EMV) encryption key scheme. For more information, see Encrypt data in the Amazon Web Services Payment Cryptography User Guide. You can generate an encryption key within Amazon Web Services Payment Cryptography by calling CreateKey. You can import your own encryption key by calling ImportKey. For this operation, the key must have KeyModesOfUse set to Encrypt. In asymmetric encryption, plaintext is encrypted using public component. You can import the public component of an asymmetric key pair created outside Amazon Web Services Payment Cryptography by calling ImportKey. For symmetric and DUKPT encryption, Amazon Web Services Payment Cryptography supports TDES and AES algorithms. For EMV encryption, Amazon Web Services Payment Cryptography supports TDES algorithms.For asymmetric encryption, Amazon Web Services Payment Cryptography supports RSA. When you use TDES or TDES DUKPT, the plaintext data length must be a multiple of 8 bytes. For AES or AES DUKPT, the plaintext data length must be a multiple of 16 bytes. For RSA, it sould be equal to the key size unless padding is enabled. To encrypt using DUKPT, you must already have a BDK (Base Derivation Key) key in your account with KeyModesOfUse set to DeriveKey, or you can generate a new DUKPT key by calling CreateKey. To encrypt using EMV, you must already have an IMK (Issuer Master Key) key in your account with KeyModesOfUse set to DeriveKey. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: DecryptData GetPublicCertificate ImportKey ReEncryptData", + "Encrypts plaintext data to ciphertext using a symmetric (TDES, AES), asymmetric (RSA), or derived (DUKPT or EMV) encryption key scheme. For more information, see Encrypt data in the Amazon Web Services Payment Cryptography User Guide. You can generate an encryption key within Amazon Web Services Payment Cryptography by calling CreateKey. You can import your own encryption key by calling ImportKey. For this operation, the key must have KeyModesOfUse set to Encrypt. In asymmetric encryption, plaintext is encrypted using public component. You can import the public component of an asymmetric key pair created outside Amazon Web Services Payment Cryptography by calling ImportKey. This operation also supports dynamic keys, allowing you to pass a dynamic encryption key as a TR-31 WrappedKeyBlock. This can be used when key material is frequently rotated, such as during every card transaction, and there is need to avoid importing short-lived keys into Amazon Web Services Payment Cryptography. To encrypt using dynamic keys, the keyARN is the Key Encryption Key (KEK) of the TR-31 wrapped encryption key material. The incoming wrapped key shall have a key purpose of D0 with a mode of use of B or D. For more information, see Using Dynamic Keys in the Amazon Web Services Payment Cryptography User Guide. For symmetric and DUKPT encryption, Amazon Web Services Payment Cryptography supports TDES and AES algorithms. For EMV encryption, Amazon Web Services Payment Cryptography supports TDES algorithms.For asymmetric encryption, Amazon Web Services Payment Cryptography supports RSA. When you use TDES or TDES DUKPT, the plaintext data length must be a multiple of 8 bytes. For AES or AES DUKPT, the plaintext data length must be a multiple of 16 bytes. For RSA, it sould be equal to the key size unless padding is enabled. To encrypt using DUKPT, you must already have a BDK (Base Derivation Key) key in your account with KeyModesOfUse set to DeriveKey, or you can generate a new DUKPT key by calling CreateKey. To encrypt using EMV, you must already have an IMK (Issuer Master Key) key in your account with KeyModesOfUse set to DeriveKey. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: DecryptData GetPublicCertificate ImportKey ReEncryptData", options: [ { name: "--key-identifier", @@ -224,6 +224,86 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "generate-mac-emv-pin-change", + description: + "Generates an issuer script mac for EMV payment cards that use offline PINs as the cardholder verification method (CVM). This operation generates an authenticated issuer script response by appending the incoming message data (APDU command) with the target encrypted PIN block in ISO2 format. The command structure and method to send the issuer script update to the card is not defined by this operation and is typically determined by the applicable payment card scheme. The primary inputs to this operation include the incoming new encrypted pinblock, PIN encryption key (PEK), issuer master key (IMK), primary account number (PAN), and the payment card derivation method. The operation uses two issuer master keys - secure messaging for confidentiality (IMK-SMC) and secure messaging for integrity (IMK-SMI). The SMC key is used to internally derive a key to secure the pin, while SMI key is used to internally derive a key to authenticate the script reponse as per the EMV 4.4 - Book 2 - Security and Key Management specification. This operation supports Amex, EMV2000, EMVCommon, Mastercard and Visa derivation methods, each requiring specific input parameters. Users must follow the specific derivation method and input parameters defined by the respective payment card scheme. Use GenerateMac operation when sending a script update to an EMV card that does not involve PIN change. When assigning IAM permissions, it is important to understand that EncryptData using EMV keys and GenerateMac perform similar functions to this command. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: EncryptData GenerateMac", + options: [ + { + name: "--new-pin-pek-identifier", + description: + "The keyARN of the PEK protecting the incoming new encrypted PIN block", + args: { + name: "string", + }, + }, + { + name: "--new-encrypted-pin-block", + description: + "The incoming new encrypted PIN block data for offline pin change on an EMV card", + args: { + name: "string", + }, + }, + { + name: "--pin-block-format", + description: + "The PIN encoding format of the incoming new encrypted PIN block as specified in ISO 9564", + args: { + name: "string", + }, + }, + { + name: "--secure-messaging-integrity-key-identifier", + description: + "The keyARN of the issuer master key (IMK-SMI) used to authenticate the issuer script response", + args: { + name: "string", + }, + }, + { + name: "--secure-messaging-confidentiality-key-identifier", + description: + "The keyARN of the issuer master key (IMK-SMC) used to protect the PIN block data in the issuer script response", + args: { + name: "string", + }, + }, + { + name: "--message-data", + description: + "The message data is the APDU command from the card reader or terminal. The target encrypted PIN block, after translation to ISO2 format, is appended to this message data to generate an issuer script response", + args: { + name: "string", + }, + }, + { + name: "--derivation-method-attributes", + description: + "The attributes and data values to derive payment card specific confidentiality and integrity keys", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "generate-pin-data", description: @@ -298,7 +378,7 @@ const completionSpec: Fig.Spec = { { name: "re-encrypt-data", description: - "Re-encrypt ciphertext using DUKPT or Symmetric data encryption keys. You can either generate an encryption key within Amazon Web Services Payment Cryptography by calling CreateKey or import your own encryption key by calling ImportKey. The KeyArn for use with this operation must be in a compatible key state with KeyModesOfUse set to Encrypt. For symmetric and DUKPT encryption, Amazon Web Services Payment Cryptography supports TDES and AES algorithms. To encrypt using DUKPT, a DUKPT key must already exist within your account with KeyModesOfUse set to DeriveKey or a new DUKPT can be generated by calling CreateKey. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: DecryptData EncryptData GetPublicCertificate ImportKey", + "Re-encrypt ciphertext using DUKPT or Symmetric data encryption keys. You can either generate an encryption key within Amazon Web Services Payment Cryptography by calling CreateKey or import your own encryption key by calling ImportKey. The KeyArn for use with this operation must be in a compatible key state with KeyModesOfUse set to Encrypt. This operation also supports dynamic keys, allowing you to pass a dynamic encryption key as a TR-31 WrappedKeyBlock. This can be used when key material is frequently rotated, such as during every card transaction, and there is need to avoid importing short-lived keys into Amazon Web Services Payment Cryptography. To re-encrypt using dynamic keys, the keyARN is the Key Encryption Key (KEK) of the TR-31 wrapped encryption key material. The incoming wrapped key shall have a key purpose of D0 with a mode of use of B or D. For more information, see Using Dynamic Keys in the Amazon Web Services Payment Cryptography User Guide. For symmetric and DUKPT encryption, Amazon Web Services Payment Cryptography supports TDES and AES algorithms. To encrypt using DUKPT, a DUKPT key must already exist within your account with KeyModesOfUse set to DeriveKey or a new DUKPT can be generated by calling CreateKey. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: DecryptData EncryptData GetPublicCertificate ImportKey", options: [ { name: "--incoming-key-identifier", @@ -377,7 +457,7 @@ const completionSpec: Fig.Spec = { { name: "translate-pin-data", description: - "Translates encrypted PIN block from and to ISO 9564 formats 0,1,3,4. For more information, see Translate PIN data in the Amazon Web Services Payment Cryptography User Guide. PIN block translation involves changing the encrytion of PIN block from one encryption key to another encryption key and changing PIN block format from one to another without PIN block data leaving Amazon Web Services Payment Cryptography. The encryption key transformation can be from PEK (Pin Encryption Key) to BDK (Base Derivation Key) for DUKPT or from BDK for DUKPT to PEK. Amazon Web Services Payment Cryptography supports TDES and AES key derivation type for DUKPT translations. The allowed combinations of PIN block format translations are guided by PCI. It is important to note that not all encrypted PIN block formats (example, format 1) require PAN (Primary Account Number) as input. And as such, PIN block format that requires PAN (example, formats 0,3,4) cannot be translated to a format (format 1) that does not require a PAN for generation. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Amazon Web Services Payment Cryptography currently supports ISO PIN block 4 translation for PIN block built using legacy PAN length. That is, PAN is the right most 12 digits excluding the check digits. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: GeneratePinData VerifyPinData", + "Translates encrypted PIN block from and to ISO 9564 formats 0,1,3,4. For more information, see Translate PIN data in the Amazon Web Services Payment Cryptography User Guide. PIN block translation involves changing the encrytion of PIN block from one encryption key to another encryption key and changing PIN block format from one to another without PIN block data leaving Amazon Web Services Payment Cryptography. The encryption key transformation can be from PEK (Pin Encryption Key) to BDK (Base Derivation Key) for DUKPT or from BDK for DUKPT to PEK. Amazon Web Services Payment Cryptography supports TDES and AES key derivation type for DUKPT translations. This operation also supports dynamic keys, allowing you to pass a dynamic PEK as a TR-31 WrappedKeyBlock. This can be used when key material is frequently rotated, such as during every card transaction, and there is need to avoid importing short-lived keys into Amazon Web Services Payment Cryptography. To translate PIN block using dynamic keys, the keyARN is the Key Encryption Key (KEK) of the TR-31 wrapped PEK. The incoming wrapped key shall have a key purpose of P0 with a mode of use of B or D. For more information, see Using Dynamic Keys in the Amazon Web Services Payment Cryptography User Guide. The allowed combinations of PIN block format translations are guided by PCI. It is important to note that not all encrypted PIN block formats (example, format 1) require PAN (Primary Account Number) as input. And as such, PIN block format that requires PAN (example, formats 0,3,4) cannot be translated to a format (format 1) that does not require a PAN for generation. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Amazon Web Services Payment Cryptography currently supports ISO PIN block 4 translation for PIN block built using legacy PAN length. That is, PAN is the right most 12 digits excluding the check digits. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: GeneratePinData VerifyPinData", options: [ { name: "--incoming-key-identifier", diff --git a/src/aws/pinpoint-sms-voice-v2.ts b/src/aws/pinpoint-sms-voice-v2.ts index 81ee3d14ca38..94d81fcf795f 100644 --- a/src/aws/pinpoint-sms-voice-v2.ts +++ b/src/aws/pinpoint-sms-voice-v2.ts @@ -484,12 +484,12 @@ const completionSpec: Fig.Spec = { { name: "create-registration-attachment", description: - "Create a new registration attachment to use for uploading a file or a URL to a file. The maximum file size is 1MiB and valid file extensions are PDF, JPEG and PNG. For example, many sender ID registrations require a signed \u201cletter of authorization\u201d (LOA) to be submitted", + "Create a new registration attachment to use for uploading a file or a URL to a file. The maximum file size is 500KB and valid file extensions are PDF, JPEG and PNG. For example, many sender ID registrations require a signed \u201cletter of authorization\u201d (LOA) to be submitted. Use either AttachmentUrl or AttachmentBody to upload your attachment. If both are specified then an exception is returned", options: [ { name: "--attachment-body", description: - "The registration file to upload. The maximum file size is 1MiB and valid file extensions are PDF, JPEG and PNG", + "The registration file to upload. The maximum file size is 500KB and valid file extensions are PDF, JPEG and PNG", args: { name: "blob", }, @@ -497,7 +497,7 @@ const completionSpec: Fig.Spec = { { name: "--attachment-url", description: - "A URL to the required registration file. For example, you can provide the S3 object URL", + "Registration files have to be stored in an Amazon S3 bucket. The URI to use when sending is in the format s3://BucketName/FileName", args: { name: "string", }, @@ -1558,7 +1558,7 @@ const completionSpec: Fig.Spec = { { name: "describe-opted-out-numbers", description: - "Describes the specified opted out destination numbers or all opted out destination numbers in an opt-out list. If you specify opted out numbers, the output includes information for only the specified opted out numbers. If you specify filters, the output includes information for only those opted out numbers that meet the filter criteria. If you don't specify opted out numbers or filters, the output includes information for all opted out destination numbers in your opt-out list. If you specify an opted out number that isn't valid, an error is returned", + "Describes the specified opted out destination numbers or all opted out destination numbers in an opt-out list. If you specify opted out numbers, the output includes information for only the specified opted out numbers. If you specify filters, the output includes information for only those opted out numbers that meet the filter criteria. If you don't specify opted out numbers or filters, the output includes information for all opted out destination numbers in your opt-out list. If you specify an opted out number that isn't valid, an exception is returned", options: [ { name: "--opt-out-list-name", @@ -1571,7 +1571,7 @@ const completionSpec: Fig.Spec = { { name: "--opted-out-numbers", description: - "An array of phone numbers to search for in the OptOutList", + "An array of phone numbers to search for in the OptOutList. If you specify an opted out number that isn't valid, an exception is returned", args: { name: "list", }, diff --git a/src/aws/qbusiness.ts b/src/aws/qbusiness.ts index 59530a44e795..f50eb90bcf7d 100644 --- a/src/aws/qbusiness.ts +++ b/src/aws/qbusiness.ts @@ -142,7 +142,7 @@ const completionSpec: Fig.Spec = { { name: "--user-groups", description: - "The groups that a user associated with the chat input belongs to", + "The group names that a user associated with the chat input belongs to", args: { name: "list", }, @@ -258,7 +258,7 @@ const completionSpec: Fig.Spec = { { name: "--role-arn", description: - "The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon CloudWatch logs and metrics", + "The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon CloudWatch logs and metrics. If this property is not specified, Amazon Q Business will create a service linked role (SLR) and use it as the application's role", args: { name: "string", }, @@ -811,6 +811,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--origins", + description: + "Sets the website domain origins that are allowed to embed the Amazon Q Business web experience. The domain origin refers to the base URL for accessing a website including the protocol (http/https), the domain name, and the port number (if specified).

You must only submit a base URL and not a full path. For example, https://docs.aws.amazon.com.

", + args: { + name: "list", + }, + }, { name: "--role-arn", description: @@ -3488,6 +3496,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--origins", + description: + "Updates the website domain origins that are allowed to embed the Amazon Q Business web experience. The domain origin refers to the base URL for accessing a website including the protocol (http/https), the domain name, and the port number (if specified).

  • Any values except null submitted as part of this update will replace all previous values.

  • You must only submit a base URL and not a full path. For example, https://docs.aws.amazon.com.

", + args: { + name: "list", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/quicksight.ts b/src/aws/quicksight.ts index 71ad5a0d2a36..2148af8f3324 100644 --- a/src/aws/quicksight.ts +++ b/src/aws/quicksight.ts @@ -7698,6 +7698,16 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--restore-to-folders", + description: + "A boolean value that determines if the analysis will be restored to folders that it previously resided in. A True value restores analysis back to all folders that it previously resided in. A False value restores the analysis but does not restore the analysis back to all previously resided folders. Restoring a restricted analysis requires this parameter to be set to True", + }, + { + name: "--no-restore-to-folders", + description: + "A boolean value that determines if the analysis will be restored to folders that it previously resided in. A True value restores analysis back to all folders that it previously resided in. A False value restores the analysis but does not restore the analysis back to all previously resided folders. Restoring a restricted analysis requires this parameter to be set to True", + }, { name: "--cli-input-json", description: @@ -8477,6 +8487,54 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "start-dashboard-snapshot-job-schedule", + description: + "Starts an asynchronous job that runs an existing dashboard schedule and sends the dashboard snapshot through email. Only one job can run simultaneously in a given schedule. Repeated requests are skipped with a 202 HTTP status code. For more information, see Scheduling and sending Amazon QuickSight reports by email and Configuring email report settings for a Amazon QuickSight dashboard in the Amazon QuickSight User Guide", + options: [ + { + name: "--aws-account-id", + description: + "The ID of the Amazon Web Services account that the dashboard snapshot job is executed in", + args: { + name: "string", + }, + }, + { + name: "--dashboard-id", + description: + "The ID of the dashboard that you want to start a snapshot job schedule for", + args: { + name: "string", + }, + }, + { + name: "--schedule-id", + description: + "The ID of the schedule that you want to start a snapshot job schedule for. The schedule ID can be found in the Amazon QuickSight console in the Schedules pane of the dashboard that the schedule is configured for", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "tag-resource", description: diff --git a/src/aws/rds.ts b/src/aws/rds.ts index 517fd1c224a4..22b9a1f6b7e0 100644 --- a/src/aws/rds.ts +++ b/src/aws/rds.ts @@ -1115,12 +1115,12 @@ const completionSpec: Fig.Spec = { { name: "--enable-iam-database-authentication", description: - "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only", + "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide or IAM database authentication for MariaDB, MySQL, and PostgreSQL in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", }, { name: "--no-enable-iam-database-authentication", description: - "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only", + "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide or IAM database authentication for MariaDB, MySQL, and PostgreSQL in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", }, { name: "--backtrack-window", @@ -3934,7 +3934,7 @@ const completionSpec: Fig.Spec = { options: [ { name: "--db-shard-group-identifier", - description: "Teh name of the DB shard group to delete", + description: "The name of the DB shard group to delete", args: { name: "string", }, @@ -6214,7 +6214,7 @@ const completionSpec: Fig.Spec = { { name: "--db-shard-group-identifier", description: - "The user-supplied DB shard group identifier or the Amazon Resource Name (ARN) of the DB shard group. If this parameter is specified, information for only the specific DB shard group is returned. This parameter isn't case-sensitive. Constraints: If supplied, must match an existing DB shard group identifier", + "The user-supplied DB shard group identifier. If this parameter is specified, information for only the specific DB shard group is returned. This parameter isn't case-sensitive. Constraints: If supplied, must match an existing DB shard group identifier", args: { name: "string", }, @@ -8654,12 +8654,12 @@ const completionSpec: Fig.Spec = { { name: "--enable-iam-database-authentication", description: - "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only", + "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide or IAM database authentication for MariaDB, MySQL, and PostgreSQL in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", }, { name: "--no-enable-iam-database-authentication", description: - "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only", + "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide or IAM database authentication for MariaDB, MySQL, and PostgreSQL in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", }, { name: "--backtrack-window", @@ -8828,12 +8828,12 @@ const completionSpec: Fig.Spec = { { name: "--enable-performance-insights", description: - "Specifies whether to turn on Performance Insights for the DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters only", + "Specifies whether to turn on Performance Insights for the DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", }, { name: "--no-enable-performance-insights", description: - "Specifies whether to turn on Performance Insights for the DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters only", + "Specifies whether to turn on Performance Insights for the DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", }, { name: "--performance-insights-kms-key-id", @@ -11419,12 +11419,12 @@ const completionSpec: Fig.Spec = { { name: "--enable-iam-database-authentication", description: - "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only", + "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide or IAM database authentication for MariaDB, MySQL, and PostgreSQL in the Amazon RDS User Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters", }, { name: "--no-enable-iam-database-authentication", description: - "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only", + "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide or IAM database authentication for MariaDB, MySQL, and PostgreSQL in the Amazon RDS User Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters", }, { name: "--backtrack-window", @@ -11684,12 +11684,12 @@ const completionSpec: Fig.Spec = { { name: "--enable-iam-database-authentication", description: - "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only", + "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide or IAM database authentication for MariaDB, MySQL, and PostgreSQL in the Amazon RDS User Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters", }, { name: "--no-enable-iam-database-authentication", description: - "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only", + "Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide or IAM database authentication for MariaDB, MySQL, and PostgreSQL in the Amazon RDS User Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters", }, { name: "--backtrack-window", @@ -13594,7 +13594,7 @@ const completionSpec: Fig.Spec = { { name: "--blue-green-deployment-identifier", description: - "The unique identifier of the blue/green deployment. Constraints: Must match an existing blue/green deployment identifier", + "The resource ID of the blue/green deployment. Constraints: Must match an existing blue/green deployment resource ID", args: { name: "string", }, diff --git a/src/aws/redshift.ts b/src/aws/redshift.ts index 4b65a7901b5d..5c498e4de771 100644 --- a/src/aws/redshift.ts +++ b/src/aws/redshift.ts @@ -612,7 +612,7 @@ const completionSpec: Fig.Spec = { { name: "--node-type", description: - "The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge", + "The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.large | ra3.xlplus | ra3.4xlarge | ra3.16xlarge", args: { name: "string", }, @@ -1450,6 +1450,82 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-integration", + description: "Creates a zero-ETL integration with Amazon Redshift", + options: [ + { + name: "--source-arn", + description: + "The Amazon Resource Name (ARN) of the database to use as the source for replication", + args: { + name: "string", + }, + }, + { + name: "--target-arn", + description: + "The Amazon Resource Name (ARN) of the Amazon Redshift data warehouse to use as the target for replication", + args: { + name: "string", + }, + }, + { + name: "--integration-name", + description: "The name of the integration", + args: { + name: "string", + }, + }, + { + name: "--kms-key-id", + description: + "An Key Management Service (KMS) key identifier for the key to use to encrypt the integration. If you don't specify an encryption key, the default Amazon Web Services owned key is used", + args: { + name: "string", + }, + }, + { + name: "--tag-list", + description: "A list of tags", + args: { + name: "list", + }, + }, + { + name: "--additional-encryption-context", + description: + "An optional set of non-secret key\u2013value pairs that contains additional contextual information about the data. For more information, see Encryption context in the Amazon Web Services Key Management Service Developer Guide. You can only include this parameter if you specify the KMSKeyId parameter", + args: { + name: "map", + }, + }, + { + name: "--description", + description: "A description of the integration", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-redshift-idc-application", description: @@ -2276,6 +2352,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-integration", + description: "Deletes a zero-ETL integration with Amazon Redshift", + options: [ + { + name: "--integration-arn", + description: "The unique identifier of the integration to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-partner", description: @@ -4461,6 +4567,85 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "describe-integrations", + description: + "Describes one or more zero-ETL integrations with Amazon Redshift", + options: [ + { + name: "--integration-arn", + description: "The unique identifier of the integration", + args: { + name: "string", + }, + }, + { + name: "--max-records", + description: + "The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value. Default: 100 Constraints: minimum 20, maximum 100", + args: { + name: "integer", + }, + }, + { + name: "--marker", + description: + "An optional pagination token provided by a previous DescribeIntegrations request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords", + args: { + name: "string", + }, + }, + { + name: "--filters", + description: + "A filter that specifies one or more resources to return", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "describe-logging-status", description: @@ -5474,7 +5659,7 @@ const completionSpec: Fig.Spec = { { name: "--resource-type", description: - "The type of resource with which you want to view tags. Valid resource types are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group Subnet group HSM connection HSM certificate Parameter group Snapshot copy grant For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide", + "The type of resource with which you want to view tags. Valid resource types are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group Subnet group HSM connection HSM certificate Parameter group Snapshot copy grant Integration (zero-ETL integration) To describe the tags associated with an integration, don't specify ResourceType, instead specify the ResourceName of the integration. For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide", args: { name: "string", }, @@ -6448,7 +6633,7 @@ const completionSpec: Fig.Spec = { { name: "--node-type", description: - "The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter. For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge", + "The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter. For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.large | ra3.xlplus | ra3.4xlarge | ra3.16xlarge", args: { name: "string", }, @@ -7214,6 +7399,50 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "modify-integration", + description: "Modifies a zero-ETL integration with Amazon Redshift", + options: [ + { + name: "--integration-arn", + description: "The unique identifier of the integration to modify", + args: { + name: "string", + }, + }, + { + name: "--description", + description: "A new description for the integration", + args: { + name: "string", + }, + }, + { + name: "--integration-name", + description: "A new name for the integration", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "modify-redshift-idc-application", description: @@ -7733,7 +7962,7 @@ const completionSpec: Fig.Spec = { { name: "resize-cluster", description: - "Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method. Elastic resize operations have the following restrictions: You can only resize clusters of the following types: dc2.large dc2.8xlarge ra3.xlplus ra3.4xlarge ra3.16xlarge The type of nodes that you add must match the node type for the cluster", + "Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method. Elastic resize operations have the following restrictions: You can only resize clusters of the following types: dc2.large dc2.8xlarge ra3.large ra3.xlplus ra3.4xlarge ra3.16xlarge The type of nodes that you add must match the node type for the cluster", options: [ { name: "--cluster-identifier", diff --git a/src/aws/repostspace.ts b/src/aws/repostspace.ts index 6d73aee0c88c..070c341497c2 100644 --- a/src/aws/repostspace.ts +++ b/src/aws/repostspace.ts @@ -3,6 +3,97 @@ const completionSpec: Fig.Spec = { description: "AWS re:Post Private is a private version of AWS re:Post for enterprises with Enterprise Support or Enterprise On-Ramp Support plans. It provides access to knowledge and experts to accelerate cloud adoption and increase developer productivity. With your organization-specific private re:Post, you can build an organization-specific developer community that drives efficiencies at scale and provides access to valuable knowledge resources. Additionally, re:Post Private centralizes trusted AWS technical content and offers private discussion forums to improve how your teams collaborate internally and with AWS to remove technical obstacles, accelerate innovation, and scale more efficiently in the cloud", subcommands: [ + { + name: "batch-add-role", + description: "Add role to multiple users or groups in a private re:Post", + options: [ + { + name: "--accessor-ids", + description: + "The user or group accessor identifiers to add the role to", + args: { + name: "list", + }, + }, + { + name: "--role", + description: "The role to add to the users or groups", + args: { + name: "string", + }, + }, + { + name: "--space-id", + description: "The unique ID of the private re:Post", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "batch-remove-role", + description: + "Remove role from multiple users or groups in a private re:Post", + options: [ + { + name: "--accessor-ids", + description: + "The user or group accessor identifiers to remove the role from", + args: { + name: "list", + }, + }, + { + name: "--role", + description: "The role to remove from the users or groups", + args: { + name: "string", + }, + }, + { + name: "--space-id", + description: "The unique ID of the private re:Post", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-space", description: "Creates an AWS re:Post Private private re:Post", diff --git a/src/aws/resiliencehub.ts b/src/aws/resiliencehub.ts index 666e04f5c9a9..ae2a4190f0f8 100644 --- a/src/aws/resiliencehub.ts +++ b/src/aws/resiliencehub.ts @@ -136,6 +136,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--aws-application-arn", + description: + "Amazon Resource Name (ARN) of Resource Groups group that is integrated with an AppRegistry application. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide", + args: { + name: "string", + }, + }, { name: "--client-token", description: @@ -1425,7 +1433,7 @@ const completionSpec: Fig.Spec = { { name: "list-app-assessment-compliance-drifts", description: - "List of compliance drifts that were detected while running an assessment", + "Indicates the list of compliance drifts that were detected while running an assessment", options: [ { name: "--assessment-arn", @@ -2030,6 +2038,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--aws-application-arn", + description: + "Amazon Resource Name (ARN) of Resource Groups group that is integrated with an AppRegistry application. For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide", + args: { + name: "string", + }, + }, { name: "--from-last-assessment-time", description: diff --git a/src/aws/robomaker.ts b/src/aws/robomaker.ts index ba470bf8faa0..91f2ecb0c5f3 100644 --- a/src/aws/robomaker.ts +++ b/src/aws/robomaker.ts @@ -5,7 +5,8 @@ const completionSpec: Fig.Spec = { subcommands: [ { name: "batch-delete-worlds", - description: "Deletes one or more worlds in a batch operation", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Deletes one or more worlds in a batch operation", options: [ { name: "--worlds", @@ -36,7 +37,8 @@ const completionSpec: Fig.Spec = { }, { name: "batch-describe-simulation-job", - description: "Describes one or more simulation jobs", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes one or more simulation jobs", options: [ { name: "--jobs", @@ -68,7 +70,7 @@ const completionSpec: Fig.Spec = { { name: "cancel-deployment-job", description: - "Cancels the specified deployment job. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service", + "This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Cancels the specified deployment job", options: [ { name: "--job", @@ -98,7 +100,8 @@ const completionSpec: Fig.Spec = { }, { name: "cancel-simulation-job", - description: "Cancels the specified simulation job", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Cancels the specified simulation job", options: [ { name: "--job", @@ -129,7 +132,7 @@ const completionSpec: Fig.Spec = { { name: "cancel-simulation-job-batch", description: - "Cancels a simulation job batch. When you cancel a simulation job batch, you are also cancelling all of the active simulation jobs created as part of the batch", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Cancels a simulation job batch. When you cancel a simulation job batch, you are also cancelling all of the active simulation jobs created as part of the batch", options: [ { name: "--batch", @@ -159,7 +162,8 @@ const completionSpec: Fig.Spec = { }, { name: "cancel-world-export-job", - description: "Cancels the specified export job", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Cancels the specified export job", options: [ { name: "--job", @@ -190,7 +194,8 @@ const completionSpec: Fig.Spec = { }, { name: "cancel-world-generation-job", - description: "Cancels the specified world generator job", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Cancels the specified world generator job", options: [ { name: "--job", @@ -222,7 +227,7 @@ const completionSpec: Fig.Spec = { { name: "create-deployment-job", description: - "Deploys a specific version of a robot application to robots in a fleet. This API is no longer supported and will throw an error if used. The robot application must have a numbered applicationVersion for consistency reasons. To create a new version, use CreateRobotApplicationVersion or see Creating a Robot Application Version. After 90 days, deployment jobs expire and will be deleted. They will no longer be accessible", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page. Deploys a specific version of a robot application to robots in a fleet. The robot application must have a numbered applicationVersion for consistency reasons. To create a new version, use CreateRobotApplicationVersion or see Creating a Robot Application Version. After 90 days, deployment jobs expire and will be deleted. They will no longer be accessible", options: [ { name: "--deployment-config", @@ -283,7 +288,7 @@ const completionSpec: Fig.Spec = { { name: "create-fleet", description: - "Creates a fleet, a logical group of robots running the same robot application. This API is no longer supported and will throw an error if used", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page. Creates a fleet, a logical group of robots running the same robot application", options: [ { name: "--name", @@ -322,7 +327,7 @@ const completionSpec: Fig.Spec = { { name: "create-robot", description: - "Creates a robot. This API is no longer supported and will throw an error if used", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page. Creates a robot", options: [ { name: "--name", @@ -374,7 +379,8 @@ const completionSpec: Fig.Spec = { }, { name: "create-robot-application", - description: "Creates a robot application", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a robot application", options: [ { name: "--name", @@ -392,8 +398,7 @@ const completionSpec: Fig.Spec = { }, { name: "--robot-software-suite", - description: - "The robot software suite (ROS distribuition) used by the robot application", + description: "The robot software suite used by the robot application", args: { name: "structure", }, @@ -435,7 +440,8 @@ const completionSpec: Fig.Spec = { }, { name: "create-robot-application-version", - description: "Creates a version of a robot application", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a version of a robot application", options: [ { name: "--application", @@ -489,7 +495,8 @@ const completionSpec: Fig.Spec = { }, { name: "create-simulation-application", - description: "Creates a simulation application", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a simulation application", options: [ { name: "--name", @@ -516,7 +523,7 @@ const completionSpec: Fig.Spec = { { name: "--robot-software-suite", description: - "The robot software suite (ROS distribution) used by the simulation application", + "The robot software suite used by the simulation application", args: { name: "structure", }, @@ -566,7 +573,7 @@ const completionSpec: Fig.Spec = { { name: "create-simulation-application-version", description: - "Creates a simulation application with a specific revision id", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a simulation application with a specific revision id", options: [ { name: "--application", @@ -622,7 +629,7 @@ const completionSpec: Fig.Spec = { { name: "create-simulation-job", description: - "Creates a simulation job. After 90 days, simulation jobs expire and will be deleted. They will no longer be accessible", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a simulation job. After 90 days, simulation jobs expire and will be deleted. They will no longer be accessible", options: [ { name: "--client-request-token", @@ -738,7 +745,8 @@ const completionSpec: Fig.Spec = { }, { name: "create-world-export-job", - description: "Creates a world export job", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a world export job", options: [ { name: "--client-request-token", @@ -800,7 +808,8 @@ const completionSpec: Fig.Spec = { }, { name: "create-world-generation-job", - description: "Creates worlds using the specified template", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates worlds using the specified template", options: [ { name: "--client-request-token", @@ -862,7 +871,8 @@ const completionSpec: Fig.Spec = { }, { name: "create-world-template", - description: "Creates a world template", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a world template", options: [ { name: "--client-request-token", @@ -923,7 +933,7 @@ const completionSpec: Fig.Spec = { { name: "delete-fleet", description: - "Deletes a fleet. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Deletes a fleet", options: [ { name: "--fleet", @@ -954,7 +964,7 @@ const completionSpec: Fig.Spec = { { name: "delete-robot", description: - "Deletes a robot. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Deletes a robot", options: [ { name: "--robot", @@ -984,7 +994,8 @@ const completionSpec: Fig.Spec = { }, { name: "delete-robot-application", - description: "Deletes a robot application", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Deletes a robot application", options: [ { name: "--application", @@ -1022,7 +1033,8 @@ const completionSpec: Fig.Spec = { }, { name: "delete-simulation-application", - description: "Deletes a simulation application", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Deletes a simulation application", options: [ { name: "--application", @@ -1060,7 +1072,8 @@ const completionSpec: Fig.Spec = { }, { name: "delete-world-template", - description: "Deletes a world template", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Deletes a world template", options: [ { name: "--template", @@ -1092,7 +1105,7 @@ const completionSpec: Fig.Spec = { { name: "deregister-robot", description: - "Deregisters a robot. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Deregisters a robot", options: [ { name: "--fleet", @@ -1130,7 +1143,7 @@ const completionSpec: Fig.Spec = { { name: "describe-deployment-job", description: - "Describes a deployment job. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Describes a deployment job", options: [ { name: "--job", @@ -1161,7 +1174,7 @@ const completionSpec: Fig.Spec = { { name: "describe-fleet", description: - "Describes a fleet. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Describes a fleet", options: [ { name: "--fleet", @@ -1192,7 +1205,7 @@ const completionSpec: Fig.Spec = { { name: "describe-robot", description: - "Describes a robot. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Describes a robot", options: [ { name: "--robot", @@ -1223,7 +1236,8 @@ const completionSpec: Fig.Spec = { }, { name: "describe-robot-application", - description: "Describes a robot application", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a robot application", options: [ { name: "--application", @@ -1261,7 +1275,8 @@ const completionSpec: Fig.Spec = { }, { name: "describe-simulation-application", - description: "Describes a simulation application", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a simulation application", options: [ { name: "--application", @@ -1299,7 +1314,8 @@ const completionSpec: Fig.Spec = { }, { name: "describe-simulation-job", - description: "Describes a simulation job", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a simulation job", options: [ { name: "--job", @@ -1330,7 +1346,8 @@ const completionSpec: Fig.Spec = { }, { name: "describe-simulation-job-batch", - description: "Describes a simulation job batch", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a simulation job batch", options: [ { name: "--batch", @@ -1360,7 +1377,8 @@ const completionSpec: Fig.Spec = { }, { name: "describe-world", - description: "Describes a world", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a world", options: [ { name: "--world", @@ -1391,7 +1409,8 @@ const completionSpec: Fig.Spec = { }, { name: "describe-world-export-job", - description: "Describes a world export job", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a world export job", options: [ { name: "--job", @@ -1422,7 +1441,8 @@ const completionSpec: Fig.Spec = { }, { name: "describe-world-generation-job", - description: "Describes a world generation job", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a world generation job", options: [ { name: "--job", @@ -1453,7 +1473,8 @@ const completionSpec: Fig.Spec = { }, { name: "describe-world-template", - description: "Describes a world template", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a world template", options: [ { name: "--template", @@ -1484,7 +1505,8 @@ const completionSpec: Fig.Spec = { }, { name: "get-world-template-body", - description: "Gets the world template body", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Gets the world template body", options: [ { name: "--template", @@ -1523,7 +1545,7 @@ const completionSpec: Fig.Spec = { { name: "list-deployment-jobs", description: - "Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs", options: [ { name: "--filters", @@ -1595,7 +1617,7 @@ const completionSpec: Fig.Spec = { { name: "list-fleets", description: - "Returns a list of fleets. You can optionally provide filters to retrieve specific fleets. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Returns a list of fleets. You can optionally provide filters to retrieve specific fleets", options: [ { name: "--next-token", @@ -1667,7 +1689,7 @@ const completionSpec: Fig.Spec = { { name: "list-robot-applications", description: - "Returns a list of robot application. You can optionally provide filters to retrieve specific robot applications", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Returns a list of robot application. You can optionally provide filters to retrieve specific robot applications", options: [ { name: "--version-qualifier", @@ -1746,7 +1768,7 @@ const completionSpec: Fig.Spec = { { name: "list-robots", description: - "Returns a list of robots. You can optionally provide filters to retrieve specific robots. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Returns a list of robots. You can optionally provide filters to retrieve specific robots", options: [ { name: "--next-token", @@ -1818,7 +1840,7 @@ const completionSpec: Fig.Spec = { { name: "list-simulation-applications", description: - "Returns a list of simulation applications. You can optionally provide filters to retrieve specific simulation applications", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Returns a list of simulation applications. You can optionally provide filters to retrieve specific simulation applications", options: [ { name: "--version-qualifier", @@ -1897,7 +1919,7 @@ const completionSpec: Fig.Spec = { { name: "list-simulation-job-batches", description: - "Returns a list simulation job batches. You can optionally provide filters to retrieve specific simulation batch jobs", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Returns a list simulation job batches. You can optionally provide filters to retrieve specific simulation batch jobs", options: [ { name: "--next-token", @@ -1968,7 +1990,7 @@ const completionSpec: Fig.Spec = { { name: "list-simulation-jobs", description: - "Returns a list of simulation jobs. You can optionally provide filters to retrieve specific simulation jobs", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Returns a list of simulation jobs. You can optionally provide filters to retrieve specific simulation jobs", options: [ { name: "--next-token", @@ -2039,7 +2061,8 @@ const completionSpec: Fig.Spec = { }, { name: "list-tags-for-resource", - description: "Lists all tags on a AWS RoboMaker resource", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists all tags on a AWS RoboMaker resource", options: [ { name: "--resource-arn", @@ -2070,7 +2093,8 @@ const completionSpec: Fig.Spec = { }, { name: "list-world-export-jobs", - description: "Lists world export jobs", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists world export jobs", options: [ { name: "--next-token", @@ -2141,7 +2165,8 @@ const completionSpec: Fig.Spec = { }, { name: "list-world-generation-jobs", - description: "Lists world generator jobs", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists world generator jobs", options: [ { name: "--next-token", @@ -2212,7 +2237,8 @@ const completionSpec: Fig.Spec = { }, { name: "list-world-templates", - description: "Lists world templates", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists world templates", options: [ { name: "--next-token", @@ -2275,7 +2301,8 @@ const completionSpec: Fig.Spec = { }, { name: "list-worlds", - description: "Lists worlds", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists worlds", options: [ { name: "--next-token", @@ -2346,7 +2373,7 @@ const completionSpec: Fig.Spec = { { name: "register-robot", description: - "Registers a robot with a fleet. This API is no longer supported and will throw an error if used", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Registers a robot with a fleet. This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page", options: [ { name: "--fleet", @@ -2383,7 +2410,8 @@ const completionSpec: Fig.Spec = { }, { name: "restart-simulation-job", - description: "Restarts a running simulation job", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Restarts a running simulation job", options: [ { name: "--job", @@ -2414,7 +2442,7 @@ const completionSpec: Fig.Spec = { { name: "start-simulation-job-batch", description: - "Starts a new simulation job batch. The batch is defined using one or more SimulationJobRequest objects", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Starts a new simulation job batch. The batch is defined using one or more SimulationJobRequest objects", options: [ { name: "--client-request-token", @@ -2469,7 +2497,7 @@ const completionSpec: Fig.Spec = { { name: "sync-deployment-job", description: - "Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were added after a deployment. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were added after a deployment", options: [ { name: "--client-request-token", @@ -2508,7 +2536,7 @@ const completionSpec: Fig.Spec = { { name: "tag-resource", description: - "Adds or edits tags for a AWS RoboMaker resource. Each tag consists of a tag key and a tag value. Tag keys and tag values are both required, but tag values can be empty strings. For information about the rules that apply to tag keys and tag values, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Adds or edits tags for a AWS RoboMaker resource. Each tag consists of a tag key and a tag value. Tag keys and tag values are both required, but tag values can be empty strings. For information about the rules that apply to tag keys and tag values, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide", options: [ { name: "--resource-arn", @@ -2548,7 +2576,7 @@ const completionSpec: Fig.Spec = { { name: "untag-resource", description: - "Removes the specified tags from the specified AWS RoboMaker resource. To remove a tag, specify the tag key. To change the tag value of an existing tag key, use TagResource", + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Removes the specified tags from the specified AWS RoboMaker resource. To remove a tag, specify the tag key. To change the tag value of an existing tag key, use TagResource", options: [ { name: "--resource-arn", @@ -2587,7 +2615,8 @@ const completionSpec: Fig.Spec = { }, { name: "update-robot-application", - description: "Updates a robot application", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Updates a robot application", options: [ { name: "--application", @@ -2605,8 +2634,7 @@ const completionSpec: Fig.Spec = { }, { name: "--robot-software-suite", - description: - "The robot software suite (ROS distribution) used by the robot application", + description: "The robot software suite used by the robot application", args: { name: "structure", }, @@ -2647,7 +2675,8 @@ const completionSpec: Fig.Spec = { }, { name: "update-simulation-application", - description: "Updates a simulation application", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Updates a simulation application", options: [ { name: "--application", @@ -2674,8 +2703,7 @@ const completionSpec: Fig.Spec = { }, { name: "--robot-software-suite", - description: - "Information about the robot software suite (ROS distribution)", + description: "Information about the robot software suite", args: { name: "structure", }, @@ -2723,7 +2751,8 @@ const completionSpec: Fig.Spec = { }, { name: "update-world-template", - description: "Updates a world template", + description: + "End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Updates a world template", options: [ { name: "--template", diff --git a/src/aws/route53resolver.ts b/src/aws/route53resolver.ts index 9b0071ecafa7..899ebd5b9061 100644 --- a/src/aws/route53resolver.ts +++ b/src/aws/route53resolver.ts @@ -617,7 +617,7 @@ const completionSpec: Fig.Spec = { { name: "--destination-arn", description: - "The ARN of the resource that you want Resolver to send query logs. You can send query logs to an S3 bucket, a CloudWatch Logs log group, or a Kinesis Data Firehose delivery stream. Examples of valid values include the following: S3 bucket: arn:aws:s3:::examplebucket You can optionally append a file prefix to the end of the ARN. arn:aws:s3:::examplebucket/development/ CloudWatch Logs log group: arn:aws:logs:us-west-1:123456789012:log-group:/mystack-testgroup-12ABC1AB12A1:* Kinesis Data Firehose delivery stream: arn:aws:kinesis:us-east-2:0123456789:stream/my_stream_name", + "The ARN of the resource that you want Resolver to send query logs. You can send query logs to an S3 bucket, a CloudWatch Logs log group, or a Kinesis Data Firehose delivery stream. Examples of valid values include the following: S3 bucket: arn:aws:s3:::amzn-s3-demo-bucket You can optionally append a file prefix to the end of the ARN. arn:aws:s3:::amzn-s3-demo-bucket/development/ CloudWatch Logs log group: arn:aws:logs:us-west-1:123456789012:log-group:/mystack-testgroup-12ABC1AB12A1:* Kinesis Data Firehose delivery stream: arn:aws:kinesis:us-east-2:0123456789:stream/my_stream_name", args: { name: "string", }, @@ -3202,7 +3202,7 @@ const completionSpec: Fig.Spec = { { name: "--qtype", description: - "The DNS query type you want the rule to evaluate. Allowed values are; A: Returns an IPv4 address. AAAA: Returns an Ipv6 address. CAA: Restricts CAs that can create SSL/TLS certifications for the domain. CNAME: Returns another domain name. DS: Record that identifies the DNSSEC signing key of a delegated zone. MX: Specifies mail servers. NAPTR: Regular-expression-based rewriting of domain names. NS: Authoritative name servers. PTR: Maps an IP address to a domain name. SOA: Start of authority record for the zone. SPF: Lists the servers authorized to send emails from a domain. SRV: Application specific values that identify servers. TXT: Verifies email senders and application-specific values. A query type you define by using the DNS type ID, for example 28 for AAAA. The values must be defined as TYPENUMBER, where the NUMBER can be 1-65334, for example, TYPE28. For more information, see List of DNS record types", + "The DNS query type you want the rule to evaluate. Allowed values are; A: Returns an IPv4 address. AAAA: Returns an Ipv6 address. CAA: Restricts CAs that can create SSL/TLS certifications for the domain. CNAME: Returns another domain name. DS: Record that identifies the DNSSEC signing key of a delegated zone. MX: Specifies mail servers. NAPTR: Regular-expression-based rewriting of domain names. NS: Authoritative name servers. PTR: Maps an IP address to a domain name. SOA: Start of authority record for the zone. SPF: Lists the servers authorized to send emails from a domain. SRV: Application specific values that identify servers. TXT: Verifies email senders and application-specific values. A query type you define by using the DNS type ID, for example 28 for AAAA. The values must be defined as TYPENUMBER, where the NUMBER can be 1-65334, for example, TYPE28. For more information, see List of DNS record types. If you set up a firewall BLOCK rule with action NXDOMAIN on query type equals AAAA, this action will not be applied to synthetic IPv6 addresses generated when DNS64 is enabled", args: { name: "string", }, diff --git a/src/aws/s3api.ts b/src/aws/s3api.ts index 24e654059b28..581860b27938 100644 --- a/src/aws/s3api.ts +++ b/src/aws/s3api.ts @@ -402,7 +402,7 @@ const completionSpec: Fig.Spec = { { name: "--server-side-encryption", description: - "The server-side encryption algorithm used when storing this object in Amazon S3. Unrecognized or unsupported values won\u2019t write a destination object and will receive a 400 Bad Request response. Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a different default encryption configuration, Amazon S3 uses the corresponding encryption key to encrypt the target object copy. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption in the Amazon S3 User Guide. General purpose buckets For general purpose buckets, there are the following supported options for server-side encryption: server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), and server-side encryption with customer-provided encryption keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy. When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. Directory buckets For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration", + "The server-side encryption algorithm used when storing this object in Amazon S3. Unrecognized or unsupported values won\u2019t write a destination object and will receive a 400 Bad Request response. Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a different default encryption configuration, Amazon S3 uses the corresponding encryption key to encrypt the target object copy. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption in the Amazon S3 User Guide. General purpose buckets For general purpose buckets, there are the following supported options for server-side encryption: server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), and server-side encryption with customer-provided encryption keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy. When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. Directory buckets For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration", args: { name: "string", }, @@ -450,7 +450,7 @@ const completionSpec: Fig.Spec = { { name: "--ssekms-key-id", description: - "Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported", + "Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported", args: { name: "string", }, @@ -843,7 +843,7 @@ const completionSpec: Fig.Spec = { { name: "--ssekms-key-id", description: - "Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID. General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported", + "Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID. General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported", args: { name: "string", }, @@ -944,7 +944,7 @@ const completionSpec: Fig.Spec = { { name: "create-session", description: - "Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint API operations on directory buckets. For more information about Zonal endpoint API operations that include the Availability Zone in the request endpoint, see S3 Express One Zone APIs in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory bucket, use the CreateSession API operation. Specifically, you grant s3express:CreateSession permission to a bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the CreateSession API request on the bucket, which returns temporary security credentials that include the access key ID, secret access key, session token, and expiration. These credentials have associated permissions to access the Zonal endpoint API operations. After the session is created, you don\u2019t need to use other policies to grant permissions to each Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by applying the temporary security credentials of the session to the request headers and following the SigV4 protocol for authentication. You also apply the session token to the x-amz-s3session-token request header for authorization. Temporary security credentials are scoped to the bucket and expire after 5 minutes. After the expiration time, any calls that you make with those credentials will fail. You must use IAM credentials again to make a CreateSession API request that generates a new set of temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond the original specified interval. If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the Amazon S3 User Guide. You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. CopyObject API operation - Unlike other Zonal endpoint API operations, the CopyObject API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the CopyObject API operation on directory buckets, see CopyObject. HeadBucket API operation - Unlike other Zonal endpoint API operations, the HeadBucket API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket API operation on directory buckets, see HeadBucket. Permissions To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that grants s3express:CreateSession permission to the bucket. In a policy, you can have the s3express:SessionMode condition key to control who can create a ReadWrite or ReadOnly session. For more information about ReadWrite or ReadOnly sessions, see x-amz-create-session-mode . For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint API operations, the bucket policy should also grant both accounts the s3express:CreateSession permission. If you want to encrypt objects with SSE-KMS, you must also have the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the target KMS key. Encryption For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, you authenticate and authorize requests through CreateSession for low latency. To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session. Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported. After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration. In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, you can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) from the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), it's not supported to override the values of the encryption settings from the CreateSession request. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com", + "Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint API operations on directory buckets. For more information about Zonal endpoint API operations that include the Availability Zone in the request endpoint, see S3 Express One Zone APIs in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory bucket, use the CreateSession API operation. Specifically, you grant s3express:CreateSession permission to a bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the CreateSession API request on the bucket, which returns temporary security credentials that include the access key ID, secret access key, session token, and expiration. These credentials have associated permissions to access the Zonal endpoint API operations. After the session is created, you don\u2019t need to use other policies to grant permissions to each Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by applying the temporary security credentials of the session to the request headers and following the SigV4 protocol for authentication. You also apply the session token to the x-amz-s3session-token request header for authorization. Temporary security credentials are scoped to the bucket and expire after 5 minutes. After the expiration time, any calls that you make with those credentials will fail. You must use IAM credentials again to make a CreateSession API request that generates a new set of temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond the original specified interval. If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the Amazon S3 User Guide. You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. CopyObject API operation - Unlike other Zonal endpoint API operations, the CopyObject API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the CopyObject API operation on directory buckets, see CopyObject. HeadBucket API operation - Unlike other Zonal endpoint API operations, the HeadBucket API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket API operation on directory buckets, see HeadBucket. Permissions To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that grants s3express:CreateSession permission to the bucket. In a policy, you can have the s3express:SessionMode condition key to control who can create a ReadWrite or ReadOnly session. For more information about ReadWrite or ReadOnly sessions, see x-amz-create-session-mode . For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint API operations, the bucket policy should also grant both accounts the s3express:CreateSession permission. If you want to encrypt objects with SSE-KMS, you must also have the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the target KMS key. Encryption For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, you authenticate and authorize requests through CreateSession for low latency. To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session. Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported. After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration. In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, you can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) from the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), it's not supported to override the values of the encryption settings from the CreateSession request. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com", options: [ { name: "--session-mode", @@ -972,7 +972,7 @@ const completionSpec: Fig.Spec = { { name: "--ssekms-key-id", description: - "If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported", + "If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported", args: { name: "string", }, @@ -1569,7 +1569,7 @@ const completionSpec: Fig.Spec = { { name: "delete-object", description: - "Removes an object from a bucket. The behavior depends on the bucket's versioning state: If bucket versioning is not enabled, the operation permanently deletes the object. If bucket versioning is enabled, the operation inserts a delete marker, which becomes the current version of the object. To permanently delete an object in a versioned bucket, you must include the object\u2019s versionId in the request. For more information about versioning-enabled buckets, see Deleting object versions from a versioning-enabled bucket. If bucket versioning is suspended, the operation removes the object that has a null versionId, if there is one, and inserts a delete marker that becomes the current version of the object. If there isn't an object with a null versionId, and all versions of the object have a versionId, Amazon S3 does not remove the object and only inserts a delete marker. To permanently delete an object that has a versionId, you must include the object\u2019s versionId in the request. For more information about versioning-suspended buckets, see Deleting objects from versioning-suspended buckets. Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. To remove a specific version, you must use the versionId query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3 User Guide. To see sample requests that use versioning, see Sample Request. Directory buckets - MFA delete is not supported by directory buckets. You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions. Directory buckets - S3 Lifecycle is not supported by directory buckets. Permissions General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects request includes specific headers. s3:DeleteObject - To delete an object from a bucket, you must always have the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following action is related to DeleteObject: PutObject", + "Removes an object from a bucket. The behavior depends on the bucket's versioning state. For more information, see Best practices to consider before deleting an object. To remove a specific version, you must use the versionId query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. If the object you want to delete is in a bucket where the bucket versioning configuration is MFA delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS. For more information about MFA delete and to see example requests, see Using MFA delete and Sample request in the Amazon S3 User Guide. S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. MFA delete is not supported by directory buckets. Permissions General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects request includes specific headers. s3:DeleteObject - To delete an object from a bucket, you must always have the s3:DeleteObject permission. You can also use PutBucketLifecycle to delete objects in Amazon S3. s3:DeleteObjectVersion - To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration permissions. Directory buckets permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following action is related to DeleteObject: PutObject", options: [ { name: "--bucket", @@ -3869,6 +3869,22 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--prefix", + description: + "Limits the response to bucket names that begin with the specified bucket name prefix", + args: { + name: "string", + }, + }, + { + name: "--bucket-region", + description: + "Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints. Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -4900,7 +4916,7 @@ const completionSpec: Fig.Spec = { { name: "put-bucket-encryption", description: - "This operation configures default encryption and Amazon S3 Bucket Keys for an existing bucket. Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). General purpose buckets You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID provided in PutBucketEncryption requests. Directory buckets - You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported. S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can\u2019t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object. When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported. For directory buckets, if you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption requests. If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester\u2019s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner. Also, this action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4). Permissions General purpose bucket permissions - The s3:PutEncryptionConfiguration permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. To set a directory bucket default encryption with SSE-KMS, you must also have the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the target KMS key. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. The following operations are related to PutBucketEncryption: GetBucketEncryption DeleteBucketEncryption", + "This operation configures default encryption and Amazon S3 Bucket Keys for an existing bucket. Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). General purpose buckets You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID provided in PutBucketEncryption requests. Directory buckets - You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported. S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can\u2019t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object. When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported. For directory buckets, if you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption requests. If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester\u2019s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner. Also, this action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4). Permissions General purpose bucket permissions - The s3:PutEncryptionConfiguration permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. To set a directory bucket default encryption with SSE-KMS, you must also have the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the target KMS key. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. The following operations are related to PutBucketEncryption: GetBucketEncryption DeleteBucketEncryption", options: [ { name: "--bucket", @@ -5967,7 +5983,7 @@ const completionSpec: Fig.Spec = { { name: "--content-md5", description: - "The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication. The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide. This functionality is not supported for directory buckets", + "The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication. The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. This functionality is not supported for directory buckets", args: { name: "string", }, @@ -5983,7 +5999,7 @@ const completionSpec: Fig.Spec = { { name: "--checksum-algorithm", description: - "Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance", + "Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance", args: { name: "string", }, @@ -6133,7 +6149,7 @@ const completionSpec: Fig.Spec = { { name: "--ssekms-key-id", description: - "Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID. General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported", + "Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID. General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported", args: { name: "string", }, @@ -6783,7 +6799,7 @@ const completionSpec: Fig.Spec = { { name: "restore-object", description: - "This operation is not supported by directory buckets. The SELECT job type for the RestoreObject operation is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more Restores an archived copy of an object back into Amazon S3 This functionality is not supported for Amazon S3 on Outposts. This action performs the following types of requests: restore an archive - Restore an archived object For more information about the S3 structure in the request body, see the following: PutObject Managing Access with ACLs in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide Permissions To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version. When restoring an archived object, you can specify one of the following data access tier options in the Tier element of the request body: Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1\u20135 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3\u20135 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering. Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5\u201312 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide. To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide. After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object. If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide. Responses A successful action returns either the 200 OK or 202 Accepted status code. If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response. If the object is previously restored, Amazon S3 returns 200 OK in the response. Special errors: Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A The following operations are related to RestoreObject: PutBucketLifecycleConfiguration GetBucketNotificationConfiguration", + "This operation is not supported by directory buckets. Restores an archived copy of an object back into Amazon S3 This functionality is not supported for Amazon S3 on Outposts. This action performs the following types of requests: restore an archive - Restore an archived object For more information about the S3 structure in the request body, see the following: PutObject Managing Access with ACLs in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide Permissions To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version. When restoring an archived object, you can specify one of the following data access tier options in the Tier element of the request body: Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1\u20135 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3\u20135 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering. Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5\u201312 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide. To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide. After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object. If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide. Responses A successful action returns either the 200 OK or 202 Accepted status code. If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response. If the object is previously restored, Amazon S3 returns 200 OK in the response. Special errors: Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A The following operations are related to RestoreObject: PutBucketLifecycleConfiguration GetBucketNotificationConfiguration", options: [ { name: "--bucket", @@ -6862,7 +6878,7 @@ const completionSpec: Fig.Spec = { { name: "select-object-content", description: - "This operation is not supported by directory buckets. The SelectObjectContent operation is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the operation as usual. Learn more This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. This functionality is not supported for Amazon S3 on Outposts. For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide. Permissions You must have the s3:GetObject permission for this operation.\u00a0Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to query objects that have the following format properties: CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects. Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption. For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide. Working with the Response Body Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response. GetObject Support The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject. Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return. The GLACIER, DEEP_ARCHIVE, and REDUCED_REDUNDANCY storage classes, or the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class: You cannot query objects in the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For more information about storage classes, see Using Amazon S3 storage classes in the Amazon S3 User Guide. Special Errors For a list of special errors for this operation, see List of SELECT Object Content Error Codes The following operations are related to SelectObjectContent: GetObject GetBucketLifecycleConfiguration PutBucketLifecycleConfiguration", + "This operation is not supported by directory buckets. This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. This functionality is not supported for Amazon S3 on Outposts. For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide. Permissions You must have the s3:GetObject permission for this operation.\u00a0Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to query objects that have the following format properties: CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects. Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption. For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide. Working with the Response Body Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response. GetObject Support The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject. Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return. The GLACIER, DEEP_ARCHIVE, and REDUCED_REDUNDANCY storage classes, or the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class: You cannot query objects in the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For more information about storage classes, see Using Amazon S3 storage classes in the Amazon S3 User Guide. Special Errors For a list of special errors for this operation, see List of SELECT Object Content Error Codes The following operations are related to SelectObjectContent: GetObject GetBucketLifecycleConfiguration PutBucketLifecycleConfiguration", options: [ { name: "--bucket", diff --git a/src/aws/securitylake.ts b/src/aws/securitylake.ts index 4de11e64580a..588ab8849cb4 100644 --- a/src/aws/securitylake.ts +++ b/src/aws/securitylake.ts @@ -1,12 +1,12 @@ const completionSpec: Fig.Spec = { name: "securitylake", description: - "Amazon Security Lake is a fully managed security data lake service. You can use Security Lake to automatically centralize security data from cloud, on-premises, and custom sources into a data lake that's stored in your Amazon Web Services account. Amazon Web Services Organizations is an account management service that lets you consolidate multiple Amazon Web Services accounts into an organization that you create and centrally manage. With Organizations, you can create member accounts and invite existing accounts to join your organization. Security Lake helps you analyze security data for a more complete understanding of your security posture across the entire organization. It can also help you improve the protection of your workloads, applications, and data. The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you retain ownership over your data. Amazon Security Lake integrates with CloudTrail, a service that provides a record of actions taken by a user, role, or an Amazon Web Services service. In Security Lake, CloudTrail captures API calls for Security Lake as events. The calls captured include calls from the Security Lake console and code calls to the Security Lake API operations. If you create a trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket, including events for Security Lake. If you don't configure a trail, you can still view the most recent events in the CloudTrail console in Event history. Using the information collected by CloudTrail you can determine the request that was made to Security Lake, the IP address from which the request was made, who made the request, when it was made, and additional details. To learn more about Security Lake information in CloudTrail, see the Amazon Security Lake User Guide. Security Lake automates the collection of security-related log and event data from integrated Amazon Web Services and third-party services. It also helps you manage the lifecycle of data with customizable retention and replication settings. Security Lake converts ingested data into Apache Parquet format and a standard open-source schema called the Open Cybersecurity Schema Framework (OCSF). Other Amazon Web Services and third-party services can subscribe to the data that's stored in Security Lake for incident response and security data analytics", + "Amazon Security Lake is a fully managed security data lake service. You can use Security Lake to automatically centralize security data from cloud, on-premises, and custom sources into a data lake that's stored in your Amazon Web Services account. Amazon Web Services Organizations is an account management service that lets you consolidate multiple Amazon Web Services accounts into an organization that you create and centrally manage. With Organizations, you can create member accounts and invite existing accounts to join your organization. Security Lake helps you analyze security data for a more complete understanding of your security posture across the entire organization. It can also help you improve the protection of your workloads, applications, and data. The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you retain ownership over your data. Amazon Security Lake integrates with CloudTrail, a service that provides a record of actions taken by a user, role, or an Amazon Web Services service. In Security Lake, CloudTrail captures API calls for Security Lake as events. The calls captured include calls from the Security Lake console and code calls to the Security Lake API operations. If you create a trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket, including events for Security Lake. If you don't configure a trail, you can still view the most recent events in the CloudTrail console in Event history. Using the information collected by CloudTrail you can determine the request that was made to Security Lake, the IP address from which the request was made, who made the request, when it was made, and additional details. To learn more about Security Lake information in CloudTrail, see the Amazon Security Lake User Guide. Security Lake automates the collection of security-related log and event data from integrated Amazon Web Services services and third-party services. It also helps you manage the lifecycle of data with customizable retention and replication settings. Security Lake converts ingested data into Apache Parquet format and a standard open-source schema called the Open Cybersecurity Schema Framework (OCSF). Other Amazon Web Services services and third-party services can subscribe to the data that's stored in Security Lake for incident response and security data analytics", subcommands: [ { name: "create-aws-log-source", description: - "Adds a natively supported Amazon Web Service as an Amazon Security Lake source. Enables source types for member accounts in required Amazon Web Services Regions, based on the parameters you specify. You can choose any source type in any Region for either accounts that are part of a trusted organization or standalone accounts. Once you add an Amazon Web Service as a source, Security Lake starts collecting logs and events from it. You can use this API only to enable natively supported Amazon Web Services as a source. Use CreateCustomLogSource to enable data collection from a custom source", + "Adds a natively supported Amazon Web Services service as an Amazon Security Lake source. Enables source types for member accounts in required Amazon Web Services Regions, based on the parameters you specify. You can choose any source type in any Region for either accounts that are part of a trusted organization or standalone accounts. Once you add an Amazon Web Services service as a source, Security Lake starts collecting logs and events from it. You can use this API only to enable natively supported Amazon Web Services services as a source. Use CreateCustomLogSource to enable data collection from a custom source", options: [ { name: "--sources", @@ -42,7 +42,8 @@ const completionSpec: Fig.Spec = { options: [ { name: "--configuration", - description: "The configuration for the third-party custom source", + description: + "The configuration used for the third-party custom source", args: { name: "structure", }, @@ -58,7 +59,7 @@ const completionSpec: Fig.Spec = { { name: "--source-name", description: - "Specify the name for a third-party custom source. This must be a Regionally unique value", + "Specify the name for a third-party custom source. This must be a Regionally unique value. The sourceName you enter here, is used in the LogProviderRole name which follows the convention AmazonSecurityLake-Provider-{name of the custom source}-{region}. You must use a CustomLogSource name that is shorter than or equal to 20 characters. This ensures that the LogProviderRole name is below the 64 character limit", args: { name: "string", }, @@ -93,7 +94,7 @@ const completionSpec: Fig.Spec = { { name: "create-data-lake", description: - "Initializes an Amazon Security Lake instance with the provided (or default) configuration. You can enable Security Lake in Amazon Web Services Regions with customized settings before enabling log collection in Regions. To specify particular Regions, configure these Regions using the configurations parameter. If you have already enabled Security Lake in a Region when you call this command, the command will update the Region if you provide new configuration parameters. If you have not already enabled Security Lake in the Region when you call this API, it will set up the data lake in the Region with the specified configurations. When you enable Security Lake, it starts ingesting security data after the CreateAwsLogSource call. This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also enables all the existing settings and resources that it stores or maintains for your Amazon Web Services account in the current Region, including security log and event data. For more information, see the Amazon Security Lake User Guide", + "Initializes an Amazon Security Lake instance with the provided (or default) configuration. You can enable Security Lake in Amazon Web Services Regions with customized settings before enabling log collection in Regions. To specify particular Regions, configure these Regions using the configurations parameter. If you have already enabled Security Lake in a Region when you call this command, the command will update the Region if you provide new configuration parameters. If you have not already enabled Security Lake in the Region when you call this API, it will set up the data lake in the Region with the specified configurations. When you enable Security Lake, it starts ingesting security data after the CreateAwsLogSource call and after you create subscribers using the CreateSubscriber API. This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also enables all the existing settings and resources that it stores or maintains for your Amazon Web Services account in the current Region, including security log and event data. For more information, see the Amazon Security Lake User Guide", options: [ { name: "--configurations", @@ -141,11 +142,12 @@ const completionSpec: Fig.Spec = { { name: "create-data-lake-exception-subscription", description: - "Creates the specified notification subscription in Amazon Security Lake for the organization you specify", + "Creates the specified notification subscription in Amazon Security Lake for the organization you specify. The notification subscription is created for exceptions that cannot be resolved by Security Lake automatically", options: [ { name: "--exception-time-to-live", - description: "The expiration period and time-to-live (TTL)", + description: + "The expiration period and time-to-live (TTL). It is the duration of time until which the exception message remains", args: { name: "long", }, @@ -220,7 +222,7 @@ const completionSpec: Fig.Spec = { { name: "create-subscriber", description: - "Creates a subscription permission for accounts that are already enabled in Amazon Security Lake. You can create a subscriber with access to data in the current Amazon Web Services Region", + "Creates a subscriber for accounts that are already enabled in Amazon Security Lake. You can create a subscriber with access to data in the current Amazon Web Services Region", options: [ { name: "--access-types", @@ -232,7 +234,7 @@ const completionSpec: Fig.Spec = { { name: "--sources", description: - "The supported Amazon Web Services from which logs and events are collected. Security Lake supports log and event collection for natively supported Amazon Web Services", + "The supported Amazon Web Services services from which logs and events are collected. Security Lake supports log and event collection for natively supported Amazon Web Services services", args: { name: "list", }, @@ -329,7 +331,7 @@ const completionSpec: Fig.Spec = { { name: "delete-aws-log-source", description: - "Removes a natively supported Amazon Web Service as an Amazon Security Lake source. You can remove a source for one or more Regions. When you remove the source, Security Lake stops collecting data from that source in the specified Regions and accounts, and subscribers can no longer consume new data from the source. However, subscribers can still consume data that Security Lake collected from the source before removal. You can choose any source type in any Amazon Web Services Region for either accounts that are part of a trusted organization or standalone accounts", + "Removes a natively supported Amazon Web Services service as an Amazon Security Lake source. You can remove a source for one or more Regions. When you remove the source, Security Lake stops collecting data from that source in the specified Regions and accounts, and subscribers can no longer consume new data from the source. However, subscribers can still consume data that Security Lake collected from the source before removal. You can choose any source type in any Amazon Web Services Region for either accounts that are part of a trusted organization or standalone accounts", options: [ { name: "--sources", @@ -520,7 +522,7 @@ const completionSpec: Fig.Spec = { { name: "delete-subscriber-notification", description: - "Deletes the specified notification subscription in Amazon Security Lake for the organization you specify", + "Deletes the specified subscription notification in Amazon Security Lake for the organization you specify", options: [ { name: "--subscriber-id", @@ -575,7 +577,7 @@ const completionSpec: Fig.Spec = { { name: "get-data-lake-exception-subscription", description: - "Retrieves the details of exception notifications for the account in Amazon Security Lake", + "Retrieves the protocol and endpoint that were provided when subscribing to Amazon SNS topics for exception notifications", options: [ { name: "--cli-input-json", @@ -731,7 +733,7 @@ const completionSpec: Fig.Spec = { options: [ { name: "--max-results", - description: "List the maximum number of failures in Security Lake", + description: "Lists the maximum number of failures in Security Lake", args: { name: "integer", }, @@ -739,7 +741,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error", + "Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error", args: { name: "string", }, @@ -828,8 +830,7 @@ const completionSpec: Fig.Spec = { }, { name: "list-log-sources", - description: - "Retrieves the log sources in the current Amazon Web Services Region", + description: "Retrieves the log sources", options: [ { name: "--accounts", @@ -917,7 +918,7 @@ const completionSpec: Fig.Spec = { { name: "list-subscribers", description: - "List all subscribers for the specific Amazon Security Lake account ID. You can retrieve a list of subscriptions associated with a specific organization or Amazon Web Services account", + "Lists all subscribers for the specific Amazon Security Lake account ID. You can retrieve a list of subscriptions associated with a specific organization or Amazon Web Services account", options: [ { name: "--max-results", @@ -1125,12 +1126,12 @@ const completionSpec: Fig.Spec = { { name: "update-data-lake", description: - "Specifies where to store your security data and for how long. You can add a rollup Region to consolidate data from multiple Amazon Web Services Regions", + "You can use UpdateDataLake to specify where to store your security data, how it should be encrypted at rest and for how long. You can add a Rollup Region to consolidate data from multiple Amazon Web Services Regions, replace default encryption (SSE-S3) with Customer Manged Key, or specify transition and expiration actions through storage Lifecycle management. The UpdateDataLake API works as an \"upsert\" operation that performs an insert if the specified item or record does not exist, or an update if it already exists. Security Lake securely stores your data at rest using Amazon Web Services encryption solutions. For more details, see Data protection in Amazon Security Lake. For example, omitting the key encryptionConfiguration from a Region that is included in an update call that currently uses KMS will leave that Region's KMS key in place, but specifying encryptionConfiguration: {kmsKeyId: 'S3_MANAGED_KEY'} for that same Region will reset the key to S3-managed. For more details about lifecycle management and how to update retention settings for one or more Regions after enabling Security Lake, see the Amazon Security Lake User Guide", options: [ { name: "--configurations", description: - "Specify the Region or Regions that will contribute data to the rollup region", + "Specifies the Region or Regions that will contribute data to the rollup region", args: { name: "list", }, @@ -1170,7 +1171,7 @@ const completionSpec: Fig.Spec = { { name: "--exception-time-to-live", description: - "The time-to-live (TTL) for the exception message to remain", + "The time-to-live (TTL) for the exception message to remain. It is the duration of time until which the exception message remains", args: { name: "long", }, @@ -1218,7 +1219,7 @@ const completionSpec: Fig.Spec = { { name: "--sources", description: - "The supported Amazon Web Services from which logs and events are collected. For the list of supported Amazon Web Services, see the Amazon Security Lake User Guide", + "The supported Amazon Web Services services from which logs and events are collected. For the list of supported Amazon Web Services services, see the Amazon Security Lake User Guide", args: { name: "list", }, @@ -1241,7 +1242,8 @@ const completionSpec: Fig.Spec = { }, { name: "--subscriber-identity", - description: "The AWS identity used to access your data", + description: + "The Amazon Web Services identity used to access your data", args: { name: "structure", }, diff --git a/src/aws/sesv2.ts b/src/aws/sesv2.ts index f9af135b3966..fae3dcd644bf 100644 --- a/src/aws/sesv2.ts +++ b/src/aws/sesv2.ts @@ -2592,6 +2592,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--max-delivery-seconds", + description: + "The maximum amount of time, in seconds, that Amazon SES API v2 will attempt delivery of email. If specified, the value must greater than or equal to 300 seconds (5 minutes) and less than or equal to 50400 seconds (840 minutes)", + args: { + name: "long", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/socialmessaging.ts b/src/aws/socialmessaging.ts new file mode 100644 index 000000000000..c63676b1dc84 --- /dev/null +++ b/src/aws/socialmessaging.ts @@ -0,0 +1,548 @@ +const completionSpec: Fig.Spec = { + name: "socialmessaging", + description: + "Amazon Web Services End User Messaging Social, also referred to as Social messaging, is a messaging service that enables application developers to incorporate WhatsApp into their existing workflows. The Amazon Web Services End User Messaging Social API provides information about the Amazon Web Services End User Messaging Social API resources, including supported HTTP methods, parameters, and schemas. The Amazon Web Services End User Messaging Social API provides programmatic access to options that are unique to the WhatsApp Business Platform. If you're new to the Amazon Web Services End User Messaging Social API, it's also helpful to review What is Amazon Web Services End User Messaging Social in the Amazon Web Services End User Messaging Social User Guide. The Amazon Web Services End User Messaging Social User Guide provides tutorials, code samples, and procedures that demonstrate how to use Amazon Web Services End User Messaging Social API features programmatically and how to integrate functionality into applications. The guide also provides key information, such as integration with other Amazon Web Services services, and the quotas that apply to use of the service. Regional availability The Amazon Web Services End User Messaging Social API is available across several Amazon Web Services Regions and it provides a dedicated endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Web Services End User Messaging endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference. In each Region, Amazon Web Services maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see Amazon Web Services Global Infrastructure", + subcommands: [ + { + name: "associate-whatsapp-business-account", + description: + "This is only used through the Amazon Web Services console during sign-up to associate your WhatsApp Business Account to your Amazon Web Services account", + options: [ + { + name: "--signup-callback", + description: "Contains the callback access token", + args: { + name: "structure", + }, + }, + { + name: "--setup-finalization", + description: + "A JSON object that contains the phone numbers and WhatsApp Business Account to link to your account", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-whatsapp-media-message", + description: + "Delete a media object from the WhatsApp service. If the object is still in an Amazon S3 bucket you should delete it from there too", + options: [ + { + name: "--media-id", + description: + "The unique identifier of the media file to delete. Use the mediaId returned from PostWhatsAppMessageMedia", + args: { + name: "string", + }, + }, + { + name: "--origination-phone-number-id", + description: + "The unique identifier of the originating phone number associated with the media. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "disassociate-whatsapp-business-account", + description: + "Disassociate a WhatsApp Business Account (WABA) from your Amazon Web Services account", + options: [ + { + name: "--id", + description: + "The unique identifier of your WhatsApp Business Account. WABA identifiers are formatted as waba-01234567890123456789012345678901. Use ListLinkedWhatsAppBusinessAccounts to list all WABAs and their details", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-linked-whatsapp-business-account", + description: "Get the details of your linked WhatsApp Business Account", + options: [ + { + name: "--id", + description: + "The unique identifier, from Amazon Web Services, of the linked WhatsApp Business Account. WABA identifiers are formatted as waba-01234567890123456789012345678901. Use ListLinkedWhatsAppBusinessAccounts to list all WABAs and their details", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-linked-whatsapp-business-account-phone-number", + description: + "Use your WhatsApp phone number id to get the WABA account id and phone number details", + options: [ + { + name: "--id", + description: + "The unique identifier of the phone number. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-whatsapp-message-media", + description: + "Get a media file from the WhatsApp service. On successful completion the media file is retrieved from Meta and stored in the specified Amazon S3 bucket. Use either destinationS3File or destinationS3PresignedUrl for the destination. If both are used then an InvalidParameterException is returned", + options: [ + { + name: "--media-id", + description: "The unique identifier for the media file", + args: { + name: "string", + }, + }, + { + name: "--origination-phone-number-id", + description: + "The unique identifier of the originating phone number for the WhatsApp message media. The phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id", + args: { + name: "string", + }, + }, + { + name: "--metadata-only", + description: "Set to True to get only the metadata for the file", + }, + { + name: "--no-metadata-only", + description: "Set to True to get only the metadata for the file", + }, + { + name: "--destination-s3-presigned-url", + description: "The presign url of the media file", + args: { + name: "structure", + }, + }, + { + name: "--destination-s3-file", + description: "The bucketName and key of the S3 media file", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-linked-whatsapp-business-accounts", + description: + "List all WhatsApp Business Accounts linked to your Amazon Web Services account", + options: [ + { + name: "--next-token", + description: "The next token for pagination", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: "The maximum number of results to return", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-tags-for-resource", + description: + "List all tags associated with a resource, such as a phone number or WABA", + options: [ + { + name: "--resource-arn", + description: + "The Amazon Resource Name (ARN) of the resource to retrieve the tags from", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "post-whatsapp-message-media", + description: + "Upload a media file to the WhatsApp service. Only the specified originationPhoneNumberId has the permissions to send the media file when using SendWhatsAppMessage. You must use either sourceS3File or sourceS3PresignedUrl for the source. If both or neither are specified then an InvalidParameterException is returned", + options: [ + { + name: "--origination-phone-number-id", + description: + "The ID of the phone number to associate with the WhatsApp media file. The phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id", + args: { + name: "string", + }, + }, + { + name: "--source-s3-presigned-url", + description: "The source presign url of the media file", + args: { + name: "structure", + }, + }, + { + name: "--source-s3-file", + description: "The source S3 url for the media file", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "put-whatsapp-business-account-event-destinations", + description: + "Add an event destination to log event data from WhatsApp for a WhatsApp Business Account (WABA). A WABA can only have one event destination at a time. All resources associated with the WABA use the same event destination", + options: [ + { + name: "--id", + description: + "The unique identifier of your WhatsApp Business Account. WABA identifiers are formatted as waba-01234567890123456789012345678901. Use ListLinkedWhatsAppBusinessAccounts to list all WABAs and their details", + args: { + name: "string", + }, + }, + { + name: "--event-destinations", + description: + "An array of WhatsAppBusinessAccountEventDestination event destinations", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "send-whatsapp-message", + description: + "Send a WhatsApp message. For examples of sending a message using the Amazon Web Services CLI, see Sending messages in the Amazon Web Services End User Messaging Social User Guide", + options: [ + { + name: "--origination-phone-number-id", + description: + "The ID of the phone number used to send the WhatsApp message. If you are sending a media file only the originationPhoneNumberId used to upload the file can be used. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id", + args: { + name: "string", + }, + }, + { + name: "--message", + description: + "The message to send through WhatsApp. The length is in KB. The message field passes through a WhatsApp Message object, see Messages in the WhatsApp Business Platform Cloud API Reference", + args: { + name: "blob", + }, + }, + { + name: "--meta-api-version", + description: + "The API version for the request formatted as v{VersionNumber}. For a list of supported API versions and Amazon Web Services Regions, see Amazon Web Services End User Messaging Social API Service Endpoints in the Amazon Web Services General Reference", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "tag-resource", + description: + "Adds or overwrites only the specified tags for the specified resource. When you specify an existing tag key, the value is overwritten with the new value", + options: [ + { + name: "--resource-arn", + description: "The Amazon Resource Name (ARN) of the resource to tag", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: "The tags to add to the resource", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "untag-resource", + description: "Removes the specified tags from a resource", + options: [ + { + name: "--resource-arn", + description: + "The Amazon Resource Name (ARN) of the resource to remove tags from", + args: { + name: "string", + }, + }, + { + name: "--tag-keys", + description: "The keys of the tags to remove from the resource", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + ], +}; + +export default completionSpec; diff --git a/src/aws/supplychain.ts b/src/aws/supplychain.ts index 9d127c505389..49468cde3950 100644 --- a/src/aws/supplychain.ts +++ b/src/aws/supplychain.ts @@ -187,6 +187,67 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-instance", + description: + "Create a new instance for AWS Supply Chain. This is an asynchronous operation. Upon receiving a CreateInstance request, AWS Supply Chain immediately returns the instance resource, with instance ID, and the initializing state while simultaneously creating all required Amazon Web Services resources for an instance creation. You can use GetInstance to check the status of the instance", + options: [ + { + name: "--instance-name", + description: "The AWS Supply Chain instance name", + args: { + name: "string", + }, + }, + { + name: "--instance-description", + description: "The AWS Supply Chain instance description", + args: { + name: "string", + }, + }, + { + name: "--kms-key-arn", + description: + "The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon Web Services owned KMS key. If you don't provide anything here, AWS Supply Chain uses the Amazon Web Services owned KMS key", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: + "The Amazon Web Services tags of an instance to be created", + args: { + name: "map", + }, + }, + { + name: "--client-token", + description: "The client token for idempotency", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-data-integration-flow", description: "Delete the DataIntegrationFlow", @@ -271,6 +332,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-instance", + description: + "Delete the instance. This is an asynchronous operation. Upon receiving a DeleteInstance request, AWS Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status", + options: [ + { + name: "--instance-id", + description: "The AWS Supply Chain instance identifier", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-bill-of-materials-import-job", description: "Get status and details of a BillOfMaterialsImportJob", @@ -393,6 +485,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-instance", + description: "Get the AWS Supply Chain instance details", + options: [ + { + name: "--instance-id", + description: "The AWS Supply Chain instance identifier", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-data-integration-flows", description: "Lists all the DataIntegrationFlows in a paginated way", @@ -543,6 +665,83 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-instances", + description: "List all the AWS Supply Chain instances in a paginated way", + options: [ + { + name: "--next-token", + description: + "The pagination token to fetch the next page of instances", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "Specify the maximum number of instances to fetch in this paginated request", + args: { + name: "integer", + }, + }, + { + name: "--instance-name-filter", + description: "The filter to ListInstances based on their names", + args: { + name: "list", + }, + }, + { + name: "--instance-state-filter", + description: "The filter to ListInstances based on their state", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-tags-for-resource", description: @@ -838,6 +1037,50 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-instance", + description: "Update the instance", + options: [ + { + name: "--instance-id", + description: "The AWS Supply Chain instance identifier", + args: { + name: "string", + }, + }, + { + name: "--instance-name", + description: "The AWS Supply Chain instance name", + args: { + name: "string", + }, + }, + { + name: "--instance-description", + description: "The AWS Supply Chain instance description", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, ], }; export default completionSpec; diff --git a/src/aws/timestream-query.ts b/src/aws/timestream-query.ts index b15731cd1c6b..740957d90778 100644 --- a/src/aws/timestream-query.ts +++ b/src/aws/timestream-query.ts @@ -245,7 +245,8 @@ const completionSpec: Fig.Spec = { }, { name: "execute-scheduled-query", - description: "You can use this API to run a scheduled query manually", + description: + "You can use this API to run a scheduled query manually. If you enabled QueryInsights, this API also returns insights and metrics related to the query that you executed as part of an Amazon SNS notification. QueryInsights helps with performance tuning of your query", options: [ { name: "--scheduled-query-arn", @@ -269,6 +270,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--query-insights", + description: + "Encapsulates settings for enabling QueryInsights. Enabling QueryInsights returns insights and metrics as a part of the Amazon SNS notification for the query that you executed. You can use QueryInsights to tune your query performance and cost", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -465,7 +474,7 @@ const completionSpec: Fig.Spec = { { name: "query", description: - "Query is a synchronous operation that enables you to run a query against your Amazon Timestream data. Query will time out after 60 seconds. You must update the default timeout in the SDK to support a timeout of 60 seconds. See the code sample for details. Your query request will fail in the following cases: If you submit a Query request with the same client token outside of the 5-minute idempotency window. If you submit a Query request with the same client token, but change other parameters, within the 5-minute idempotency window. If the size of the row (including the query metadata) exceeds 1 MB, then the query will fail with the following error message: Query aborted as max page response size has been exceeded by the output result row If the IAM principal of the query initiator and the result reader are not the same and/or the query initiator and the result reader do not have the same query string in the query requests, the query will fail with an Invalid pagination token error", + "Query is a synchronous operation that enables you to run a query against your Amazon Timestream data. If you enabled QueryInsights, this API also returns insights and metrics related to the query that you executed. QueryInsights helps with performance tuning of your query. The maximum number of Query API requests you're allowed to make with QueryInsights enabled is 1 query per second (QPS). If you exceed this query rate, it might result in throttling. Query will time out after 60 seconds. You must update the default timeout in the SDK to support a timeout of 60 seconds. See the code sample for details. Your query request will fail in the following cases: If you submit a Query request with the same client token outside of the 5-minute idempotency window. If you submit a Query request with the same client token, but change other parameters, within the 5-minute idempotency window. If the size of the row (including the query metadata) exceeds 1 MB, then the query will fail with the following error message: Query aborted as max page response size has been exceeded by the output result row If the IAM principal of the query initiator and the result reader are not the same and/or the query initiator and the result reader do not have the same query string in the query requests, the query will fail with an Invalid pagination token error", options: [ { name: "--query-string", @@ -498,6 +507,14 @@ const completionSpec: Fig.Spec = { name: "integer", }, }, + { + name: "--query-insights", + description: + "Encapsulates settings for enabling QueryInsights. Enabling QueryInsights returns insights and metrics in addition to query results for the query that you executed. You can use QueryInsights to tune your query performance", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/transfer.ts b/src/aws/transfer.ts index 77b87f1fef6d..a32aa93a51a6 100644 --- a/src/aws/transfer.ts +++ b/src/aws/transfer.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "transfer", description: - "Transfer Family is a fully managed service that enables the transfer of files over the File Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3) or Amazon EFS. Additionally, you can use Applicability Statement 2 (AS2) to transfer files into and out of Amazon S3. Amazon Web Services helps you seamlessly migrate your file transfer workflows to Transfer Family by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53 so nothing changes for your customers and partners, or their applications. With your data in Amazon S3, you can use it with Amazon Web Services for processing, analytics, machine learning, and archiving. Getting started with Transfer Family is easy since there is no infrastructure to buy and set up", + "Transfer Family is a fully managed service that enables the transfer of files over the File Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3) or Amazon EFS. Additionally, you can use Applicability Statement 2 (AS2) to transfer files into and out of Amazon S3. Amazon Web Services helps you seamlessly migrate your file transfer workflows to Transfer Family by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53 so nothing changes for your customers and partners, or their applications. With your data in Amazon S3, you can use it with Amazon Web Services services for processing, analytics, machine learning, and archiving. Getting started with Transfer Family is easy since there is no infrastructure to buy and set up", subcommands: [ { name: "create-access", @@ -129,7 +129,7 @@ const completionSpec: Fig.Spec = { { name: "--base-directory", description: - "The landing directory (folder) for files transferred by using the AS2 protocol. A BaseDirectory example is /DOC-EXAMPLE-BUCKET/home/mydirectory", + "The landing directory (folder) for files transferred by using the AS2 protocol. A BaseDirectory example is /amzn-s3-demo-bucket/home/mydirectory", args: { name: "string", }, @@ -1904,6 +1904,86 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-file-transfer-results", + description: + "Returns real-time updates and detailed information on the status of each individual file being transferred in a specific file transfer operation. You specify the file transfer by providing its ConnectorId and its TransferId. File transfer results are available up to 7 days after an operation has been requested", + options: [ + { + name: "--connector-id", + description: + "A unique identifier for a connector. This value should match the value supplied to the corresponding StartFileTransfer call", + args: { + name: "string", + }, + }, + { + name: "--transfer-id", + description: + "A unique identifier for a file transfer. This value should match the value supplied to the corresponding StartFileTransfer call", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "If there are more file details than returned in this call, use this value for a subsequent call to ListFileTransferResults to retrieve them", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of files to return in a single page. Note that currently you can specify a maximum of 10 file paths in a single StartFileTransfer operation. Thus, the maximum number of file transfer results that can be returned in a single page is 10", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-host-keys", description: @@ -2481,7 +2561,7 @@ const completionSpec: Fig.Spec = { { name: "--send-file-paths", description: - "One or more source paths for the Amazon S3 storage. Each string represents a source file path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt . Replace DOC-EXAMPLE-BUCKET with one of your actual buckets", + "One or more source paths for the Amazon S3 storage. Each string represents a source file path for one outbound file transfer. For example, amzn-s3-demo-bucket/myfile.txt . Replace amzn-s3-demo-bucket with one of your actual buckets", args: { name: "list", }, @@ -2909,7 +2989,7 @@ const completionSpec: Fig.Spec = { { name: "--base-directory", description: - "To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /DOC-EXAMPLE-BUCKET/home/mydirectory", + "To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /amzn-s3-demo-bucket/home/mydirectory", args: { name: "string", }, @@ -3191,7 +3271,7 @@ const completionSpec: Fig.Spec = { { name: "--endpoint-type", description: - "The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it. After May 19, 2021, you won't be able to create a server using EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount if your account hasn't already done so before May 19, 2021. If you have already created servers with EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount on or before May 19, 2021, you will not be affected. After this date, use EndpointType=VPC. For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint. It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT", + "The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it. After May 19, 2021, you won't be able to create a server using EndpointType=VPC_ENDPOINT in your Amazon Web Services account if your account hasn't already done so before May 19, 2021. If you have already created servers with EndpointType=VPC_ENDPOINT in your Amazon Web Services account on or before May 19, 2021, you will not be affected. After this date, use EndpointType=VPC. For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint. It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT", args: { name: "string", }, diff --git a/src/aws/wafv2.ts b/src/aws/wafv2.ts index 566921361152..2e788bd3b4c3 100644 --- a/src/aws/wafv2.ts +++ b/src/aws/wafv2.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "wafv2", description: - 'WAF This is the latest version of the WAF API, released in November, 2019. The names of the entities that you use to access this API, like endpoints and namespaces, all have the versioning information added, like "V2" or "v2", to distinguish from the prior version. We recommend migrating your resources to this version, because it has a number of significant improvements. If you used WAF prior to this release, you can\'t use this WAFV2 API to access any WAF resources that you created before. You can access your old rules, web ACLs, and other WAF resources only through the WAF Classic APIs. The WAF Classic APIs have retained the prior names, endpoints, and namespaces. For information, including how to migrate your WAF resources to this version, see the WAF Developer Guide. WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to an Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync GraphQL API, Amazon Cognito user pool, App Runner service, or Amazon Web Services Verified Access instance. WAF also lets you control access to your content, to protect the Amazon Web Services resource that WAF is monitoring. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, the protected resource responds to requests with either the requested content, an HTTP 403 status code (Forbidden), or with a custom response. This API guide is for developers who need detailed information about WAF API actions, data types, and errors. For detailed information about WAF features and guidance for configuring and using WAF, see the WAF Developer Guide. You can make calls using the endpoints listed in WAF endpoints and quotas. For regional applications, you can use any of the endpoints in the list. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance. For Amazon CloudFront applications, you must use the API endpoint listed for US East (N. Virginia): us-east-1. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that\'s tailored to the programming language or platform that you\'re using. For more information, see Amazon Web Services SDKs. We currently provide two versions of the WAF API: this API and the prior versions, the classic WAF APIs. This new API provides the same functionality as the older versions, with the following major improvements: You use one API for both global and regional applications. Where you need to distinguish the scope, you specify a Scope parameter and set it to CLOUDFRONT or REGIONAL. You can define a web ACL or rule group with a single call, and update it with a single call. You define all rule specifications in JSON format, and pass them to your rule group or web ACL calls. The limits WAF places on the use of rules more closely reflects the cost of running each type of rule. Rule groups include capacity settings, so you know the maximum cost of a rule group when you use it', + 'WAF This is the latest version of the WAF API, released in November, 2019. The names of the entities that you use to access this API, like endpoints and namespaces, all have the versioning information added, like "V2" or "v2", to distinguish from the prior version. We recommend migrating your resources to this version, because it has a number of significant improvements. If you used WAF prior to this release, you can\'t use this WAFV2 API to access any WAF resources that you created before. WAF Classic support will end on September 30, 2025. For information about WAF, including how to migrate your WAF Classic resources to this version, see the WAF Developer Guide. WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to an Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync GraphQL API, Amazon Cognito user pool, App Runner service, or Amazon Web Services Verified Access instance. WAF also lets you control access to your content, to protect the Amazon Web Services resource that WAF is monitoring. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, the protected resource responds to requests with either the requested content, an HTTP 403 status code (Forbidden), or with a custom response. This API guide is for developers who need detailed information about WAF API actions, data types, and errors. For detailed information about WAF features and guidance for configuring and using WAF, see the WAF Developer Guide. You can make calls using the endpoints listed in WAF endpoints and quotas. For regional applications, you can use any of the endpoints in the list. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance. For Amazon CloudFront applications, you must use the API endpoint listed for US East (N. Virginia): us-east-1. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that\'s tailored to the programming language or platform that you\'re using. For more information, see Amazon Web Services SDKs', subcommands: [ { name: "associate-web-acl", @@ -507,7 +507,7 @@ const completionSpec: Fig.Spec = { { name: "delete-firewall-manager-rule-groups", description: - "Deletes all rule groups that are managed by Firewall Manager for the specified web ACL. You can only use this if ManagedByFirewallManager is false in the specified WebACL", + "Deletes all rule groups that are managed by Firewall Manager from the specified WebACL. You can only use this if ManagedByFirewallManager and RetrofittedByFirewallManager are both false in the web ACL", options: [ { name: "--web-acl-arn", @@ -791,7 +791,7 @@ const completionSpec: Fig.Spec = { { name: "delete-web-acl", description: - "Deletes the specified WebACL. You can only use this if ManagedByFirewallManager is false in the specified WebACL. Before deleting any web ACL, first disassociate it from all resources. To retrieve a list of the resources that are associated with a web ACL, use the following calls: For regional resources, call ListResourcesForWebACL. For Amazon CloudFront distributions, use the CloudFront call ListDistributionsByWebACLId. For information, see ListDistributionsByWebACLId in the Amazon CloudFront API Reference. To disassociate a resource from a web ACL, use the following calls: For regional resources, call DisassociateWebACL. For Amazon CloudFront distributions, provide an empty web ACL ID in the CloudFront call UpdateDistribution. For information, see UpdateDistribution in the Amazon CloudFront API Reference", + "Deletes the specified WebACL. You can only use this if ManagedByFirewallManager is false in the web ACL. Before deleting any web ACL, first disassociate it from all resources. To retrieve a list of the resources that are associated with a web ACL, use the following calls: For regional resources, call ListResourcesForWebACL. For Amazon CloudFront distributions, use the CloudFront call ListDistributionsByWebACLId. For information, see ListDistributionsByWebACLId in the Amazon CloudFront API Reference. To disassociate a resource from a web ACL, use the following calls: For regional resources, call DisassociateWebACL. For Amazon CloudFront distributions, provide an empty web ACL ID in the CloudFront call UpdateDistribution. For information, see UpdateDistribution in the Amazon CloudFront API Reference", options: [ { name: "--name",