From 77ae7ee5a9e7d36da4b932e9ac18df00a1834de2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Jul 2023 11:40:22 +0000 Subject: [PATCH] build(deps): bump github.com/tektoncd/pipeline from 0.45.0 to 0.50.0 Bumps [github.com/tektoncd/pipeline](https://github.com/tektoncd/pipeline) from 0.45.0 to 0.50.0. - [Release notes](https://github.com/tektoncd/pipeline/releases) - [Changelog](https://github.com/tektoncd/pipeline/blob/main/releases.md) - [Commits](https://github.com/tektoncd/pipeline/compare/v0.45.0...v0.50.0) --- updated-dependencies: - dependency-name: github.com/tektoncd/pipeline dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 198 +- go.sum | 491 +- .../go/compute/internal/version.go | 2 +- vendor/cloud.google.com/go/iam/CHANGES.md | 57 + .../go/iam/apiv1/iampb/iam_policy.pb.go | 44 +- .../go/iam/apiv1/iampb/options.pb.go | 24 +- .../go/iam/apiv1/iampb/policy.pb.go | 107 +- vendor/cloud.google.com/go/iam/iam.go | 2 +- vendor/cloud.google.com/go/kms/apiv1/doc.go | 75 +- .../go/kms/apiv1/ekm_client.go | 366 +- .../go/kms/apiv1/gapic_metadata.json | 437 - vendor/cloud.google.com/go/kms/apiv1/iam.go | 2 +- .../go/kms/apiv1/key_management_client.go | 319 +- .../go/kms/apiv1/kmspb/ekm_service.pb.go | 1038 +- .../go/kms/apiv1/kmspb/resources.pb.go | 534 +- .../go/kms/apiv1/kmspb/service.pb.go | 397 +- .../go/kms/internal/version.go | 2 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 572 ++ .../azure-sdk-for-go/sdk/azcore/LICENSE.txt} | 6 +- .../azure-sdk-for-go/sdk/azcore/README.md | 39 + .../Azure/azure-sdk-for-go/sdk/azcore/ci.yml | 29 + .../sdk/azcore/cloud/cloud.go | 44 + .../azure-sdk-for-go/sdk/azcore/cloud/doc.go | 53 + .../Azure/azure-sdk-for-go/sdk/azcore/core.go | 114 + .../Azure/azure-sdk-for-go/sdk/azcore/doc.go | 257 + .../azure-sdk-for-go/sdk/azcore/errors.go | 14 + .../Azure/azure-sdk-for-go/sdk/azcore/etag.go | 48 + .../sdk/azcore/internal/exported/exported.go | 67 + .../sdk/azcore/internal/exported/pipeline.go | 97 + .../sdk/azcore/internal/exported/request.go | 182 + .../internal/exported/response_error.go | 144 + .../sdk/azcore/internal/log/log.go | 38 + .../azcore/internal/pollers/async/async.go | 159 + .../sdk/azcore/internal/pollers/body/body.go | 135 + .../sdk/azcore/internal/pollers/loc/loc.go | 119 + .../sdk/azcore/internal/pollers/op/op.go | 145 + .../sdk/azcore/internal/pollers/poller.go | 24 + .../sdk/azcore/internal/pollers/util.go | 187 + .../sdk/azcore/internal/shared/constants.go | 36 + .../sdk/azcore/internal/shared/shared.go | 103 + .../azure-sdk-for-go/sdk/azcore/log/doc.go | 10 + .../azure-sdk-for-go/sdk/azcore/log/log.go | 50 + .../azure-sdk-for-go/sdk/azcore/policy/doc.go | 10 + .../sdk/azcore/policy/policy.go | 164 + .../sdk/azcore/runtime/doc.go | 10 + .../sdk/azcore/runtime/errors.go | 19 + .../sdk/azcore/runtime/pager.go | 77 + .../sdk/azcore/runtime/pipeline.go | 66 + .../sdk/azcore/runtime/policy_api_version.go | 75 + .../sdk/azcore/runtime/policy_bearer_token.go | 116 + .../azcore/runtime/policy_body_download.go | 72 + .../sdk/azcore/runtime/policy_http_header.go | 39 + .../azcore/runtime/policy_include_response.go | 34 + .../sdk/azcore/runtime/policy_logging.go | 263 + .../sdk/azcore/runtime/policy_request_id.go | 34 + .../sdk/azcore/runtime/policy_retry.go | 262 + .../sdk/azcore/runtime/policy_telemetry.go | 79 + .../sdk/azcore/runtime/poller.go | 327 + .../sdk/azcore/runtime/request.go | 248 + .../sdk/azcore/runtime/response.go | 135 + .../runtime/transport_default_http_client.go | 37 + .../sdk/azcore/streaming/doc.go | 9 + .../sdk/azcore/streaming/progress.go | 75 + .../azure-sdk-for-go/sdk/azcore/to/doc.go | 9 + .../azure-sdk-for-go/sdk/azcore/to/to.go | 21 + .../sdk/azcore/tracing/constants.go | 41 + .../sdk/azcore/tracing/tracing.go | 168 + .../sdk/azidentity/CHANGELOG.md | 409 + .../sdk/azidentity/LICENSE.txt} | 12 +- .../sdk/azidentity/MIGRATION.md | 307 + .../azure-sdk-for-go/sdk/azidentity/README.md | 243 + .../sdk/azidentity/TROUBLESHOOTING.md | 205 + .../sdk/azidentity/assets.json | 6 + .../sdk/azidentity/azidentity.go | 190 + .../sdk/azidentity/azure_cli_credential.go | 180 + .../azidentity/chained_token_credential.go | 138 + .../azure-sdk-for-go/sdk/azidentity/ci.yml | 47 + .../azidentity/client_assertion_credential.go | 83 + .../client_certificate_credential.go | 172 + .../azidentity/client_secret_credential.go | 75 + .../azidentity/default_azure_credential.go | 209 + .../sdk/azidentity/device_code_credential.go | 136 + .../sdk/azidentity/environment_credential.go | 164 + .../azure-sdk-for-go/sdk/azidentity/errors.go | 129 + .../interactive_browser_credential.go | 106 + .../sdk/azidentity/logging.go | 14 + .../sdk/azidentity/managed_identity_client.go | 388 + .../azidentity/managed_identity_credential.go | 127 + .../sdk/azidentity/on_behalf_of_credential.go | 99 + .../azure-sdk-for-go/sdk/azidentity/syncer.go | 130 + .../username_password_credential.go | 81 + .../sdk/azidentity/version.go | 15 + .../sdk/azidentity/workload_identity.go | 126 + .../sdk/internal/LICENSE.txt} | 12 +- .../sdk/internal/diag/diag.go | 51 + .../azure-sdk-for-go/sdk/internal/diag/doc.go | 7 + .../sdk/internal/errorinfo/doc.go | 7 + .../sdk/internal/errorinfo/errorinfo.go | 16 + .../sdk/internal/exported/exported.go | 124 + .../azure-sdk-for-go/sdk/internal/log/doc.go | 7 + .../azure-sdk-for-go/sdk/internal/log/log.go | 104 + .../sdk/internal/poller/util.go | 155 + .../sdk/internal/temporal/resource.go | 123 + .../azure-sdk-for-go/sdk/internal/uuid/doc.go | 7 + .../sdk/internal/uuid/uuid.go | 76 + .../sdk/security/keyvault/azkeys/CHANGELOG.md | 136 + .../sdk/security/keyvault/azkeys/LICENSE.txt | 21 + .../sdk/security/keyvault/azkeys/README.md | 147 + .../keyvault/azkeys/TROUBLESHOOTING.md | 4 + .../sdk/security/keyvault/azkeys/assets.json | 6 + .../sdk/security/keyvault/azkeys/autorest.md | 229 + .../sdk/security/keyvault/azkeys/build.go | 10 + .../security/keyvault/azkeys/ci.security.yml | 34 + .../sdk/security/keyvault/azkeys/client.go | 1294 +++ .../sdk/security/keyvault/azkeys/constants.go | 214 + .../security/keyvault/azkeys/custom_client.go | 63 + .../sdk/security/keyvault/azkeys/models.go | 548 + .../security/keyvault/azkeys/models_serde.go | 1120 +++ .../keyvault/azkeys/platform-matrix.json | 17 + .../keyvault/azkeys/response_types.go | 130 + .../keyvault/azkeys/test-resources-post.ps1 | 118 + .../keyvault/azkeys/test-resources.json | 296 + .../sdk/security/keyvault/azkeys/time_unix.go | 62 + .../sdk/security/keyvault/azkeys/version.go | 12 + .../security/keyvault/internal/CHANGELOG.md | 62 + .../security/keyvault/internal/LICENSE.txt | 21 + .../sdk/security/keyvault/internal/README.md | 21 + .../keyvault/internal/challenge_policy.go | 175 + .../keyvault/internal/ci.securitykeyvault.yml | 28 + .../security/keyvault/internal/constants.go | 11 + .../sdk/security/keyvault/internal/doc.go | 7 + .../sdk/security/keyvault/internal/parse.go | 37 + .../services/keyvault/auth/auth.go | 65 - .../keyvault/v7.1/keyvault/CHANGELOG.md | 26 - .../services/keyvault/v7.1/keyvault/client.go | 7313 -------------- .../v7.1/keyvault/dataplane_meta.json | 11 - .../services/keyvault/v7.1/keyvault/enums.go | 231 - .../services/keyvault/v7.1/keyvault/models.go | 3611 ------- .../keyvault/v7.1/keyvault/version.go | 19 - .../Azure/azure-sdk-for-go/version/version.go | 2 +- .../Azure/go-autorest/autorest/adal/token.go | 44 +- .../Azure/go-autorest/autorest/autorest.go | 32 +- .../go-autorest/autorest/azure/auth/README.md | 152 + .../go-autorest/autorest/azure/auth/auth.go | 13 +- .../Azure/go-autorest/autorest/azure/azure.go | 2 +- .../Azure/go-autorest/autorest/to/convert.go | 152 - .../autorest/to/go_mod_tidy_hack.go | 24 - .../Azure/go-autorest/autorest/utility.go | 6 +- .../go-autorest/autorest/validation/error.go | 48 - .../autorest/validation/go_mod_tidy_hack.go | 24 - .../autorest/validation/validation.go | 406 - .../LICENSE | 21 + .../apps/cache/cache.go | 54 + .../apps/confidential/confidential.go | 685 ++ .../apps/errors/error_design.md | 111 + .../apps/errors/errors.go | 89 + .../apps/internal/base/base.go | 467 + .../internal/base/internal/storage/items.go | 200 + .../internal/storage/partitioned_storage.go | 436 + .../internal/base/internal/storage/storage.go | 517 + .../storage/test_serialized_cache.json | 56 + .../apps/internal/exported/exported.go | 34 + .../apps/internal/json/design.md | 140 + .../apps/internal/json/json.go | 184 + .../apps/internal/json/mapslice.go | 333 + .../apps/internal/json/marshal.go | 346 + .../apps/internal/json/struct.go | 290 + .../apps/internal/json/types/time/time.go | 70 + .../apps/internal/local/server.go | 177 + .../apps/internal/oauth/oauth.go | 353 + .../oauth/ops/accesstokens/accesstokens.go | 451 + .../oauth/ops/accesstokens/apptype_string.go | 25 + .../internal/oauth/ops/accesstokens/tokens.go | 335 + .../internal/oauth/ops/authority/authority.go | 552 + .../ops/authority/authorizetype_string.go | 30 + .../internal/oauth/ops/internal/comm/comm.go | 320 + .../oauth/ops/internal/comm/compress.go | 33 + .../oauth/ops/internal/grant/grant.go | 17 + .../apps/internal/oauth/ops/ops.go | 56 + .../ops/wstrust/defs/endpointtype_string.go | 25 + .../wstrust/defs/mex_document_definitions.go | 394 + .../defs/saml_assertion_definitions.go | 230 + .../oauth/ops/wstrust/defs/version_string.go | 25 + .../ops/wstrust/defs/wstrust_endpoint.go | 199 + .../ops/wstrust/defs/wstrust_mex_document.go | 159 + .../internal/oauth/ops/wstrust/wstrust.go | 136 + .../apps/internal/oauth/resolvers.go | 149 + .../apps/internal/options/options.go | 52 + .../apps/internal/shared/shared.go | 71 + .../apps/internal/version/version.go | 8 + .../apps/public/public.go | 683 ++ .../Microsoft/go-winio/.golangci.yml | 27 +- .../github.com/Microsoft/go-winio/hvsock.go | 6 +- .../Microsoft/go-winio/internal/fs/doc.go | 2 + .../Microsoft/go-winio/internal/fs/fs.go | 202 + .../go-winio/internal/fs/security.go | 12 + .../go-winio/internal/fs/zsyscall_windows.go | 64 + .../go-winio/internal/socket/socket.go | 4 +- .../go-winio/internal/stringbuffer/wstring.go | 132 + vendor/github.com/Microsoft/go-winio/pipe.go | 22 +- .../Microsoft/go-winio/zsyscall_windows.go | 19 - vendor/github.com/armon/go-metrics/.gitignore | 26 - .../github.com/armon/go-metrics/.travis.yml | 13 - vendor/github.com/armon/go-metrics/LICENSE | 20 - vendor/github.com/armon/go-metrics/README.md | 91 - .../github.com/armon/go-metrics/const_unix.go | 12 - .../armon/go-metrics/const_windows.go | 13 - vendor/github.com/armon/go-metrics/inmem.go | 339 - .../armon/go-metrics/inmem_endpoint.go | 162 - .../armon/go-metrics/inmem_signal.go | 117 - vendor/github.com/armon/go-metrics/metrics.go | 299 - vendor/github.com/armon/go-metrics/sink.go | 132 - vendor/github.com/armon/go-metrics/start.go | 158 - vendor/github.com/armon/go-metrics/statsd.go | 184 - .../github.com/armon/go-metrics/statsite.go | 172 - vendor/github.com/armon/go-radix/.gitignore | 22 - vendor/github.com/armon/go-radix/.travis.yml | 3 - vendor/github.com/armon/go-radix/LICENSE | 20 - vendor/github.com/armon/go-radix/README.md | 38 - vendor/github.com/armon/go-radix/radix.go | 540 - .../github.com/aws/aws-sdk-go-v2/CHANGELOG.md | 3491 ++++++- .../aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md | 4 +- .../aws/aws-sdk-go-v2/CONTRIBUTING.md | 35 +- vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md | 2 +- vendor/github.com/aws/aws-sdk-go-v2/Makefile | 18 +- .../github.com/aws/aws-sdk-go-v2/NOTICE.txt | 2 +- vendor/github.com/aws/aws-sdk-go-v2/README.md | 19 +- .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/middleware/recursion_detection.go | 94 + .../aws-sdk-go-v2/aws/protocol/query/array.go | 11 + .../aws/protocol/xml/error_utils.go | 16 +- .../aws/ratelimit/token_rate_limit.go | 4 - .../aws/aws-sdk-go-v2/aws/retry/middleware.go | 5 +- .../aws/retry/retryable_error.go | 5 + .../aws/aws-sdk-go-v2/aws/retryer.go | 2 +- .../aws/signer/internal/v4/headers.go | 1 + .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 76 + .../config/go_module_metadata.go | 2 +- .../config/resolve_credentials.go | 24 +- .../aws-sdk-go-v2/credentials/CHANGELOG.md | 72 + .../credentials/ec2rolecreds/doc.go | 2 +- .../credentials/go_module_metadata.go | 2 +- .../credentials/processcreds/provider.go | 24 +- .../aws-sdk-go-v2/credentials/ssocreds/doc.go | 24 +- .../feature/ec2/imds/CHANGELOG.md | 32 + .../feature/ec2/imds/api_client.go | 10 + .../feature/ec2/imds/go_module_metadata.go | 2 +- .../feature/ec2/imds/token_provider.go | 82 +- .../internal/configsources/CHANGELOG.md | 28 + .../configsources/go_module_metadata.go | 2 +- .../internal/endpoints/v2/CHANGELOG.md | 28 + .../endpoints/v2/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 28 + .../internal/ini/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/ecr/CHANGELOG.md | 77 + .../aws-sdk-go-v2/service/ecr/api_client.go | 2 +- .../ecr/api_op_BatchCheckLayerAvailability.go | 7 +- .../service/ecr/api_op_BatchDeleteImage.go | 11 +- .../service/ecr/api_op_BatchGetImage.go | 7 +- ...BatchGetRepositoryScanningConfiguration.go | 3 + .../service/ecr/api_op_CompleteLayerUpload.go | 3 + .../ecr/api_op_CreatePullThroughCacheRule.go | 3 + .../service/ecr/api_op_CreateRepository.go | 16 +- .../ecr/api_op_DeleteLifecyclePolicy.go | 3 + .../ecr/api_op_DeletePullThroughCacheRule.go | 3 + .../ecr/api_op_DeleteRegistryPolicy.go | 3 + .../service/ecr/api_op_DeleteRepository.go | 3 + .../ecr/api_op_DeleteRepositoryPolicy.go | 3 + .../api_op_DescribeImageReplicationStatus.go | 3 + .../ecr/api_op_DescribeImageScanFindings.go | 39 +- .../service/ecr/api_op_DescribeImages.go | 51 +- .../api_op_DescribePullThroughCacheRules.go | 5 +- .../service/ecr/api_op_DescribeRegistry.go | 3 + .../ecr/api_op_DescribeRepositories.go | 37 +- .../ecr/api_op_GetAuthorizationToken.go | 6 +- .../ecr/api_op_GetDownloadUrlForLayer.go | 13 +- .../service/ecr/api_op_GetLifecyclePolicy.go | 3 + .../ecr/api_op_GetLifecyclePolicyPreview.go | 27 +- .../service/ecr/api_op_GetRegistryPolicy.go | 3 + ...api_op_GetRegistryScanningConfiguration.go | 3 + .../service/ecr/api_op_GetRepositoryPolicy.go | 3 + .../service/ecr/api_op_InitiateLayerUpload.go | 3 + .../service/ecr/api_op_ListImages.go | 7 +- .../service/ecr/api_op_ListTagsForResource.go | 3 + .../service/ecr/api_op_PutImage.go | 15 +- .../api_op_PutImageScanningConfiguration.go | 5 +- .../ecr/api_op_PutImageTagMutability.go | 8 +- .../service/ecr/api_op_PutLifecyclePolicy.go | 7 +- .../service/ecr/api_op_PutRegistryPolicy.go | 11 +- ...api_op_PutRegistryScanningConfiguration.go | 12 +- .../ecr/api_op_PutReplicationConfiguration.go | 8 +- .../service/ecr/api_op_SetRepositoryPolicy.go | 13 +- .../service/ecr/api_op_StartImageScan.go | 8 +- .../ecr/api_op_StartLifecyclePolicyPreview.go | 7 +- .../service/ecr/api_op_TagResource.go | 3 + .../service/ecr/api_op_UntagResource.go | 3 + .../service/ecr/api_op_UploadLayerPart.go | 9 +- .../service/ecr/deserializers.go | 492 +- .../aws/aws-sdk-go-v2/service/ecr/doc.go | 20 +- .../service/ecr/go_module_metadata.go | 2 +- .../ecr/internal/endpoints/endpoints.go | 55 + .../aws-sdk-go-v2/service/ecr/types/enums.go | 20 +- .../aws-sdk-go-v2/service/ecr/types/errors.go | 304 +- .../aws-sdk-go-v2/service/ecr/types/types.go | 69 +- .../service/ecrpublic/CHANGELOG.md | 73 + .../service/ecrpublic/api_client.go | 2 +- .../api_op_BatchCheckLayerAvailability.go | 27 +- .../ecrpublic/api_op_BatchDeleteImage.go | 25 +- .../ecrpublic/api_op_CompleteLayerUpload.go | 20 +- .../ecrpublic/api_op_CreateRepository.go | 22 +- .../ecrpublic/api_op_DeleteRepository.go | 17 +- .../api_op_DeleteRepositoryPolicy.go | 17 +- .../ecrpublic/api_op_DescribeImageTags.go | 57 +- .../ecrpublic/api_op_DescribeImages.go | 67 +- .../ecrpublic/api_op_DescribeRegistries.go | 51 +- .../ecrpublic/api_op_DescribeRepositories.go | 67 +- .../ecrpublic/api_op_GetAuthorizationToken.go | 5 +- .../api_op_GetRegistryCatalogData.go | 3 + .../api_op_GetRepositoryCatalogData.go | 9 +- .../ecrpublic/api_op_GetRepositoryPolicy.go | 17 +- .../ecrpublic/api_op_InitiateLayerUpload.go | 22 +- .../ecrpublic/api_op_ListTagsForResource.go | 7 +- .../service/ecrpublic/api_op_PutImage.go | 33 +- .../api_op_PutRegistryCatalogData.go | 5 +- .../api_op_PutRepositoryCatalogData.go | 8 +- .../ecrpublic/api_op_SetRepositoryPolicy.go | 27 +- .../service/ecrpublic/api_op_TagResource.go | 15 +- .../service/ecrpublic/api_op_UntagResource.go | 7 +- .../ecrpublic/api_op_UploadLayerPart.go | 26 +- .../service/ecrpublic/deserializers.go | 402 +- .../aws-sdk-go-v2/service/ecrpublic/doc.go | 10 +- .../service/ecrpublic/go_module_metadata.go | 2 +- .../ecrpublic/internal/endpoints/endpoints.go | 41 + .../service/ecrpublic/types/errors.go | 267 +- .../service/ecrpublic/types/types.go | 199 +- .../internal/presigned-url/CHANGELOG.md | 28 + .../presigned-url/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/kms/CHANGELOG.md | 69 + .../aws-sdk-go-v2/service/kms/api_client.go | 2 +- .../service/kms/api_op_CancelKeyDeletion.go | 31 +- .../kms/api_op_ConnectCustomKeyStore.go | 62 +- .../service/kms/api_op_CreateAlias.go | 94 +- .../kms/api_op_CreateCustomKeyStore.go | 225 +- .../service/kms/api_op_CreateGrant.go | 199 +- .../service/kms/api_op_CreateKey.go | 420 +- .../service/kms/api_op_Decrypt.go | 196 +- .../service/kms/api_op_DeleteAlias.go | 53 +- .../kms/api_op_DeleteCustomKeyStore.go | 56 +- .../kms/api_op_DeleteImportedKeyMaterial.go | 50 +- .../kms/api_op_DescribeCustomKeyStores.go | 55 +- .../service/kms/api_op_DescribeKey.go | 132 +- .../service/kms/api_op_DisableKey.go | 40 +- .../service/kms/api_op_DisableKeyRotation.go | 77 +- .../kms/api_op_DisconnectCustomKeyStore.go | 38 +- .../service/kms/api_op_EnableKey.go | 28 +- .../service/kms/api_op_EnableKeyRotation.go | 82 +- .../service/kms/api_op_Encrypt.go | 145 +- .../service/kms/api_op_GenerateDataKey.go | 208 +- .../service/kms/api_op_GenerateDataKeyPair.go | 162 +- ..._op_GenerateDataKeyPairWithoutPlaintext.go | 101 +- .../api_op_GenerateDataKeyWithoutPlaintext.go | 120 +- .../service/kms/api_op_GenerateMac.go | 62 +- .../service/kms/api_op_GenerateRandom.go | 66 +- .../service/kms/api_op_GetKeyPolicy.go | 23 +- .../kms/api_op_GetKeyRotationStatus.go | 97 +- .../kms/api_op_GetParametersForImport.go | 136 +- .../service/kms/api_op_GetPublicKey.go | 116 +- .../service/kms/api_op_ImportKeyMaterial.go | 185 +- .../service/kms/api_op_ListAliases.go | 63 +- .../service/kms/api_op_ListGrants.go | 53 +- .../service/kms/api_op_ListKeyPolicies.go | 33 +- .../service/kms/api_op_ListKeys.go | 23 +- .../service/kms/api_op_ListResourceTags.go | 52 +- .../service/kms/api_op_ListRetirableGrants.go | 52 +- .../service/kms/api_op_PutKeyPolicy.go | 109 +- .../service/kms/api_op_ReEncrypt.go | 240 +- .../service/kms/api_op_ReplicateKey.go | 258 +- .../service/kms/api_op_RetireGrant.go | 52 +- .../service/kms/api_op_RevokeGrant.go | 60 +- .../service/kms/api_op_ScheduleKeyDeletion.go | 95 +- .../aws-sdk-go-v2/service/kms/api_op_Sign.go | 166 +- .../service/kms/api_op_TagResource.go | 86 +- .../service/kms/api_op_UntagResource.go | 57 +- .../service/kms/api_op_UpdateAlias.go | 94 +- .../kms/api_op_UpdateCustomKeyStore.go | 119 +- .../kms/api_op_UpdateKeyDescription.go | 36 +- .../service/kms/api_op_UpdatePrimaryRegion.go | 96 +- .../service/kms/api_op_Verify.go | 114 +- .../service/kms/api_op_VerifyMac.go | 52 +- .../service/kms/deserializers.go | 652 +- .../aws/aws-sdk-go-v2/service/kms/doc.go | 110 +- .../service/kms/go_module_metadata.go | 2 +- .../kms/internal/endpoints/endpoints.go | 63 + .../aws-sdk-go-v2/service/kms/serializers.go | 45 + .../aws-sdk-go-v2/service/kms/types/enums.go | 46 +- .../aws-sdk-go-v2/service/kms/types/errors.go | 322 +- .../aws-sdk-go-v2/service/kms/types/types.go | 485 +- .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 49 + .../aws-sdk-go-v2/service/sso/api_client.go | 2 +- .../service/sso/api_op_GetRoleCredentials.go | 10 +- .../service/sso/api_op_ListAccountRoles.go | 6 +- .../service/sso/api_op_ListAccounts.go | 9 +- .../service/sso/api_op_Logout.go | 11 +- .../service/sso/deserializers.go | 48 +- .../aws/aws-sdk-go-v2/service/sso/doc.go | 9 +- .../service/sso/go_module_metadata.go | 2 +- .../sso/internal/endpoints/endpoints.go | 46 + .../aws-sdk-go-v2/service/sso/types/errors.go | 8 +- .../aws-sdk-go-v2/service/sso/types/types.go | 9 +- .../service/ssooidc/CHANGELOG.md | 49 + .../service/ssooidc/api_client.go | 2 +- .../service/ssooidc/api_op_CreateToken.go | 22 +- .../service/ssooidc/api_op_RegisterClient.go | 11 +- .../api_op_StartDeviceAuthorization.go | 18 +- .../service/ssooidc/deserializers.go | 36 +- .../aws/aws-sdk-go-v2/service/ssooidc/doc.go | 54 +- .../service/ssooidc/go_module_metadata.go | 2 +- .../ssooidc/internal/endpoints/endpoints.go | 46 + .../service/ssooidc/types/errors.go | 34 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 57 + .../aws-sdk-go-v2/service/sts/api_client.go | 2 +- .../service/sts/api_op_AssumeRole.go | 176 +- .../service/sts/api_op_AssumeRoleWithSAML.go | 138 +- .../sts/api_op_AssumeRoleWithWebIdentity.go | 186 +- .../sts/api_op_DecodeAuthorizationMessage.go | 35 +- .../service/sts/api_op_GetAccessKeyInfo.go | 16 +- .../service/sts/api_op_GetCallerIdentity.go | 21 +- .../service/sts/api_op_GetFederationToken.go | 159 +- .../service/sts/api_op_GetSessionToken.go | 118 +- .../aws/aws-sdk-go-v2/service/sts/doc.go | 9 +- .../service/sts/go_module_metadata.go | 2 +- .../sts/internal/endpoints/endpoints.go | 49 + .../aws-sdk-go-v2/service/sts/serializers.go | 9 - .../aws-sdk-go-v2/service/sts/types/errors.go | 35 +- .../aws-sdk-go-v2/service/sts/types/types.go | 32 +- .../gen-go/agent/common/v1/common.pb.go | 38 +- .../agent/metrics/v1/metrics_service.pb.go | 138 +- .../agent/metrics/v1/metrics_service.pb.gw.go | 24 +- .../metrics/v1/metrics_service_grpc.pb.go | 126 - .../gen-go/agent/trace/v1/trace_service.pb.go | 212 +- .../agent/trace/v1/trace_service.pb.gw.go | 24 +- .../agent/trace/v1/trace_service_grpc.pb.go | 200 - .../gen-go/metrics/v1/metrics.pb.go | 55 +- .../gen-go/resource/v1/resource.pb.go | 17 +- .../gen-go/trace/v1/trace.pb.go | 47 +- .../gen-go/trace/v1/trace_config.pb.go | 17 +- vendor/github.com/cespare/xxhash/v2/README.md | 31 +- .../github.com/cespare/xxhash/v2/testall.sh | 10 + vendor/github.com/cespare/xxhash/v2/xxhash.go | 47 +- .../cespare/xxhash/v2/xxhash_amd64.s | 336 +- .../cespare/xxhash/v2/xxhash_arm64.s | 183 + .../v2/{xxhash_amd64.go => xxhash_asm.go} | 2 + .../cespare/xxhash/v2/xxhash_other.go | 22 +- .../cespare/xxhash/v2/xxhash_safe.go | 1 + .../cespare/xxhash/v2/xxhash_unsafe.go | 3 +- .../pkg/token/token.go | 38 + .../github.com/cloudevents/sdk-go/v2/alias.go | 10 +- .../cloudevents/sdk-go/v2/binding/encoding.go | 5 + .../sdk-go/v2/binding/format/format.go | 22 + .../cloudevents/sdk-go/v2/binding/to_event.go | 19 + .../cloudevents/sdk-go/v2/event/extensions.go | 2 +- .../sdk-go/v2/protocol/http/message.go | 3 + .../sdk-go/v2/protocol/http/utility.go | 63 + .../containerd/containerd/version/version.go | 2 +- .../stargz-snapshotter/estargz/LICENSE | 202 + .../stargz-snapshotter/estargz/build.go | 690 ++ .../estargz/errorutil/errors.go | 40 + .../stargz-snapshotter/estargz/estargz.go | 1223 +++ .../stargz-snapshotter/estargz/gzip.go | 237 + .../stargz-snapshotter/estargz/testutil.go | 2366 +++++ .../stargz-snapshotter/estargz/types.go | 342 + .../docker/cli/cli/config/configfile/file.go | 1 - .../cli/connhelper/commandconn/commandconn.go | 2 +- .../github.com/docker/cli/cli/streams/in.go | 27 +- .../github.com/docker/cli/cli/streams/out.go | 30 +- .../docker/cli/cli/streams/stream.go | 13 +- .../distribution/reference/reference.go | 4 +- vendor/github.com/docker/docker/AUTHORS | 18 + .../docker/pkg/homedir/homedir_linux.go | 9 +- .../emicklei/go-restful/v3/CHANGES.md | 20 +- .../emicklei/go-restful/v3/README.md | 4 + .../emicklei/go-restful/v3/constants.go | 2 + .../emicklei/go-restful/v3/request.go | 5 +- .../emicklei/go-restful/v3/response.go | 3 + .../emicklei/go-restful/v3/route.go | 4 +- .../emicklei/go-restful/v3/route_builder.go | 34 +- vendor/github.com/fatih/color/LICENSE.md | 20 - vendor/github.com/fatih/color/README.md | 178 - vendor/github.com/fatih/color/color.go | 618 -- vendor/github.com/fatih/color/doc.go | 135 - .../github.com/go-jose/go-jose/v3/.gitignore | 2 + .../go-jose/go-jose/v3/.golangci.yml | 53 + .../github.com/go-jose/go-jose/v3/.travis.yml | 33 + .../go-jose/go-jose/v3/BUG-BOUNTY.md | 10 + .../go-jose/go-jose/v3/CONTRIBUTING.md | 15 + vendor/github.com/go-jose/go-jose/v3/LICENSE | 202 + .../github.com/go-jose/go-jose/v3/README.md | 122 + .../go-jose/go-jose/v3/asymmetric.go | 592 ++ .../go-jose/go-jose/v3/cipher/cbc_hmac.go | 196 + .../go-jose/go-jose/v3/cipher/concat_kdf.go | 75 + .../go-jose/go-jose/v3/cipher/ecdh_es.go | 86 + .../go-jose/go-jose/v3/cipher/key_wrap.go | 109 + .../github.com/go-jose/go-jose/v3/crypter.go | 544 + vendor/github.com/go-jose/go-jose/v3/doc.go | 27 + .../github.com/go-jose/go-jose/v3/encoding.go | 191 + .../go-jose/v3/json}/LICENSE | 2 +- .../go-jose/go-jose/v3/json/README.md | 13 + .../go-jose/go-jose/v3/json/decode.go | 1217 +++ .../go-jose/go-jose/v3/json/encode.go | 1197 +++ .../go-jose/go-jose/v3/json/indent.go | 141 + .../go-jose/go-jose/v3/json/scanner.go | 623 ++ .../go-jose/go-jose/v3/json/stream.go | 485 + .../go-jose/go-jose/v3/json/tags.go | 44 + vendor/github.com/go-jose/go-jose/v3/jwe.go | 295 + vendor/github.com/go-jose/go-jose/v3/jwk.go | 798 ++ vendor/github.com/go-jose/go-jose/v3/jws.go | 366 + .../go-jose/go-jose/v3}/jwt/builder.go | 8 +- .../go-jose/go-jose/v3}/jwt/claims.go | 11 +- .../go-jose/go-jose/v3}/jwt/doc.go | 0 .../go-jose/go-jose/v3}/jwt/errors.go | 22 +- .../go-jose/go-jose/v3}/jwt/jwt.go | 46 +- .../go-jose/go-jose/v3}/jwt/validation.go | 34 +- .../github.com/go-jose/go-jose/v3/opaque.go | 144 + .../github.com/go-jose/go-jose/v3/shared.go | 520 + .../github.com/go-jose/go-jose/v3/signing.go | 450 + .../go-jose/go-jose/v3/symmetric.go | 495 + vendor/github.com/go-logr/logr/.golangci.yaml | 3 - vendor/github.com/go-logr/logr/discard.go | 32 +- vendor/github.com/go-logr/logr/funcr/funcr.go | 27 +- vendor/github.com/go-logr/logr/logr.go | 166 +- .../go-openapi/jsonpointer/.travis.yml | 15 - .../go-openapi/jsonreference/.golangci.yml | 13 +- .../go-openapi/jsonreference/.travis.yml | 24 - .../jsonreference/internal/normalize_url.go | 22 +- vendor/github.com/golang-jwt/jwt/v4/README.md | 18 +- vendor/github.com/golang-jwt/jwt/v4/claims.go | 6 +- vendor/github.com/golang-jwt/jwt/v4/parser.go | 7 + vendor/github.com/golang-jwt/jwt/v4/token.go | 20 +- .../golang/protobuf/jsonpb/decode.go | 8 +- .../golang/protobuf/ptypes/empty/empty.pb.go | 62 - vendor/github.com/golang/snappy/.gitignore | 16 - vendor/github.com/golang/snappy/AUTHORS | 18 - vendor/github.com/golang/snappy/CONTRIBUTORS | 41 - vendor/github.com/golang/snappy/README | 107 - vendor/github.com/golang/snappy/decode.go | 264 - .../github.com/golang/snappy/decode_amd64.s | 490 - .../github.com/golang/snappy/decode_arm64.s | 494 - vendor/github.com/golang/snappy/decode_asm.go | 15 - .../github.com/golang/snappy/decode_other.go | 115 - vendor/github.com/golang/snappy/encode.go | 289 - .../github.com/golang/snappy/encode_amd64.s | 730 -- .../github.com/golang/snappy/encode_arm64.s | 722 -- vendor/github.com/golang/snappy/encode_asm.go | 30 - .../github.com/golang/snappy/encode_other.go | 238 - vendor/github.com/golang/snappy/snappy.go | 98 - .../internal/estargz/estargz.go | 54 + .../pkg/authn/k8schain/k8schain.go | 4 +- .../pkg/authn/keychain.go | 71 +- .../pkg/authn/kubernetes/keychain.go | 82 +- .../go-containerregistry/pkg/name/digest.go | 1 + .../go-containerregistry/pkg/name/registry.go | 6 + .../go-containerregistry/pkg/v1/config.go | 15 + .../pkg/v1/empty/README.md | 8 + .../go-containerregistry/pkg/v1/empty/doc.go | 16 + .../pkg/v1/empty/image.go | 52 + .../pkg/v1/empty/index.go | 65 + .../pkg/v1/google/keychain.go | 13 +- .../pkg/v1/google/list.go | 16 +- .../go-containerregistry/pkg/v1/hash.go | 6 +- .../go-containerregistry/pkg/v1/manifest.go | 17 +- .../pkg/v1/mutate/README.md | 56 + .../go-containerregistry/pkg/v1/mutate/doc.go | 16 + .../pkg/v1/mutate/image.go | 293 + .../pkg/v1/mutate/index.go | 232 + .../pkg/v1/mutate/mutate.go | 553 + .../pkg/v1/mutate/rebase.go | 144 + .../pkg/v1/partial/index.go | 80 + .../pkg/v1/partial/with.go | 35 + .../go-containerregistry/pkg/v1/platform.go | 41 + .../pkg/v1/remote/catalog.go | 131 +- .../pkg/v1/remote/delete.go | 33 +- .../pkg/v1/remote/descriptor.go | 314 +- .../pkg/v1/remote/fetcher.go | 311 + .../pkg/v1/remote/image.go | 44 +- .../pkg/v1/remote/index.go | 70 +- .../pkg/v1/remote/layer.go | 37 +- .../pkg/v1/remote/list.go | 123 +- .../pkg/v1/remote/multi_write.go | 282 +- .../pkg/v1/remote/options.go | 85 +- .../pkg/v1/remote/progress.go | 7 + .../pkg/v1/remote/puller.go | 222 + .../pkg/v1/remote/pusher.go | 559 ++ .../pkg/v1/remote/referrers.go | 117 + .../pkg/v1/remote/schema1.go | 118 + .../pkg/v1/remote/transport/error.go | 27 +- .../pkg/v1/remote/transport/retry.go | 2 +- .../pkg/v1/remote/write.go | 604 +- .../pkg/v1/stream/layer.go | 8 +- .../pkg/v1/tarball/README.md | 280 + .../pkg/v1/tarball/doc.go | 17 + .../pkg/v1/tarball/image.go | 429 + .../pkg/v1/tarball/layer.go | 354 + .../pkg/v1/tarball/write.go | 457 + .../pkg/v1/types/types.go | 25 + .../pkg/v1/zz_deepcopy_generated.go | 15 + vendor/github.com/google/s2a-go/.gitignore | 6 + .../google/s2a-go/CODE_OF_CONDUCT.md | 93 + .../github.com/google/s2a-go/CONTRIBUTING.md | 29 + vendor/github.com/google/s2a-go/LICENSE.md | 202 + vendor/github.com/google/s2a-go/README.md | 17 + .../google/s2a-go/fallback/s2a_fallback.go | 167 + .../s2a-go/internal/authinfo/authinfo.go | 119 + .../s2a-go/internal/handshaker/handshaker.go | 438 + .../internal/handshaker/service/service.go | 99 + .../proto/common_go_proto/common.pb.go | 389 + .../s2a_context_go_proto/s2a_context.pb.go | 267 + .../internal/proto/s2a_go_proto/s2a.pb.go | 1377 +++ .../proto/s2a_go_proto/s2a_grpc.pb.go | 173 + .../proto/v2/common_go_proto/common.pb.go | 367 + .../v2/s2a_context_go_proto/s2a_context.pb.go | 248 + .../internal/proto/v2/s2a_go_proto/s2a.pb.go | 2494 +++++ .../proto/v2/s2a_go_proto/s2a_grpc.pb.go | 159 + .../internal/aeadcrypter/aeadcrypter.go | 34 + .../record/internal/aeadcrypter/aesgcm.go | 70 + .../record/internal/aeadcrypter/chachapoly.go | 67 + .../record/internal/aeadcrypter/common.go | 92 + .../record/internal/halfconn/ciphersuite.go | 98 + .../record/internal/halfconn/counter.go | 60 + .../record/internal/halfconn/expander.go | 59 + .../record/internal/halfconn/halfconn.go | 193 + .../google/s2a-go/internal/record/record.go | 757 ++ .../s2a-go/internal/record/ticketsender.go | 176 + .../internal/tokenmanager/tokenmanager.go | 70 + .../google/s2a-go/internal/v2/README.md | 1 + .../internal/v2/certverifier/certverifier.go | 122 + .../testdata/client_intermediate_cert.der | Bin 0 -> 998 bytes .../testdata/client_leaf_cert.der | Bin 0 -> 1147 bytes .../testdata/client_root_cert.der | Bin 0 -> 1013 bytes .../testdata/server_intermediate_cert.der | Bin 0 -> 998 bytes .../testdata/server_leaf_cert.der | Bin 0 -> 1147 bytes .../testdata/server_root_cert.der | Bin 0 -> 1013 bytes .../internal/v2/remotesigner/remotesigner.go | 186 + .../v2/remotesigner/testdata/client_cert.der | Bin 0 -> 1013 bytes .../v2/remotesigner/testdata/client_cert.pem | 24 + .../v2/remotesigner/testdata/client_key.pem | 27 + .../v2/remotesigner/testdata/server_cert.der | Bin 0 -> 1013 bytes .../v2/remotesigner/testdata/server_cert.pem | 24 + .../v2/remotesigner/testdata/server_key.pem | 27 + .../google/s2a-go/internal/v2/s2av2.go | 354 + .../internal/v2/testdata/client_cert.pem | 24 + .../internal/v2/testdata/client_key.pem | 27 + .../internal/v2/testdata/server_cert.pem | 24 + .../internal/v2/testdata/server_key.pem | 27 + .../tlsconfigstore/testdata/client_cert.pem | 24 + .../v2/tlsconfigstore/testdata/client_key.pem | 27 + .../tlsconfigstore/testdata/server_cert.pem | 24 + .../v2/tlsconfigstore/testdata/server_key.pem | 27 + .../v2/tlsconfigstore/tlsconfigstore.go | 404 + vendor/github.com/google/s2a-go/s2a.go | 412 + .../github.com/google/s2a-go/s2a_options.go | 208 + vendor/github.com/google/s2a-go/s2a_utils.go | 79 + .../google/s2a-go/stream/s2a_stream.go | 34 + .../google/s2a-go/testdata/client_cert.pem | 24 + .../google/s2a-go/testdata/client_key.pem | 27 + .../google/s2a-go/testdata/server_cert.pem | 24 + .../google/s2a-go/testdata/server_key.pem | 27 + .../client/client.go | 37 +- .../client/util/util.go | 26 +- .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 52 + .../googleapis/gax-go/v2/apierror/apierror.go | 25 +- .../googleapis/gax-go/v2/call_option.go | 21 + .../github.com/googleapis/gax-go/v2/header.go | 68 +- .../googleapis/gax-go/v2/internal/version.go | 2 +- .../github.com/googleapis/gax-go/v2/invoke.go | 10 + .../grpc-gateway/v2/LICENSE.txt | 27 + .../v2/internal/httprule/BUILD.bazel | 35 + .../v2/internal/httprule/compile.go | 121 + .../grpc-gateway/v2/internal/httprule/fuzz.go | 11 + .../v2/internal/httprule/parse.go | 369 + .../v2/internal/httprule/types.go | 60 + .../grpc-gateway/v2/runtime/BUILD.bazel | 97 + .../grpc-gateway/v2/runtime/context.go | 364 + .../grpc-gateway/v2/runtime/convert.go | 322 + .../grpc-gateway/v2/runtime/doc.go | 5 + .../grpc-gateway/v2/runtime/errors.go | 181 + .../grpc-gateway/v2/runtime/fieldmask.go | 165 + .../grpc-gateway/v2/runtime/handler.go | 227 + .../v2/runtime/marshal_httpbodyproto.go | 32 + .../grpc-gateway/v2/runtime/marshal_json.go | 45 + .../grpc-gateway/v2/runtime/marshal_jsonpb.go | 355 + .../grpc-gateway/v2/runtime/marshal_proto.go | 63 + .../grpc-gateway/v2/runtime/marshaler.go | 50 + .../v2/runtime/marshaler_registry.go | 109 + .../grpc-gateway/v2/runtime/mux.go | 442 + .../grpc-gateway/v2/runtime/pattern.go | 383 + .../grpc-gateway/v2/runtime/proto2_convert.go | 80 + .../grpc-gateway/v2/runtime/query.go | 342 + .../grpc-gateway/v2/utilities/BUILD.bazel | 31 + .../grpc-gateway/v2/utilities/doc.go | 2 + .../grpc-gateway/v2/utilities/pattern.go | 22 + .../v2/utilities/readerfactory.go | 20 + .../v2/utilities/string_array_flag.go | 33 + .../grpc-gateway/v2/utilities/trie.go | 174 + .../github.com/hashicorp/go-hclog/.gitignore | 1 - .../github.com/hashicorp/go-hclog/README.md | 148 - .../hashicorp/go-hclog/colorize_unix.go | 29 - .../hashicorp/go-hclog/colorize_windows.go | 38 - .../github.com/hashicorp/go-hclog/context.go | 38 - .../github.com/hashicorp/go-hclog/exclude.go | 71 - .../github.com/hashicorp/go-hclog/global.go | 64 - .../hashicorp/go-hclog/interceptlogger.go | 204 - .../hashicorp/go-hclog/intlogger.go | 911 -- .../github.com/hashicorp/go-hclog/logger.go | 373 - .../hashicorp/go-hclog/nulllogger.go | 58 - .../hashicorp/go-hclog/stacktrace.go | 109 - .../github.com/hashicorp/go-hclog/stdlog.go | 110 - .../github.com/hashicorp/go-hclog/writer.go | 82 - .../hashicorp/go-immutable-radix/.gitignore | 24 - .../hashicorp/go-immutable-radix/CHANGELOG.md | 23 - .../hashicorp/go-immutable-radix/LICENSE | 363 - .../hashicorp/go-immutable-radix/README.md | 66 - .../hashicorp/go-immutable-radix/edges.go | 21 - .../hashicorp/go-immutable-radix/iradix.go | 676 -- .../hashicorp/go-immutable-radix/iter.go | 205 - .../hashicorp/go-immutable-radix/node.go | 334 - .../hashicorp/go-immutable-radix/raw_iter.go | 78 - .../go-immutable-radix/reverse_iter.go | 239 - .../github.com/hashicorp/go-plugin/.gitignore | 2 - .../hashicorp/go-plugin/CHANGELOG.md | 25 - vendor/github.com/hashicorp/go-plugin/LICENSE | 355 - .../github.com/hashicorp/go-plugin/README.md | 164 - .../github.com/hashicorp/go-plugin/client.go | 1055 -- .../hashicorp/go-plugin/discover.go | 28 - .../github.com/hashicorp/go-plugin/error.go | 24 - .../hashicorp/go-plugin/grpc_broker.go | 457 - .../hashicorp/go-plugin/grpc_client.go | 126 - .../hashicorp/go-plugin/grpc_controller.go | 23 - .../hashicorp/go-plugin/grpc_server.go | 161 - .../hashicorp/go-plugin/grpc_stdio.go | 207 - .../go-plugin/internal/plugin/gen.go | 3 - .../internal/plugin/grpc_broker.pb.go | 203 - .../internal/plugin/grpc_broker.proto | 13 - .../internal/plugin/grpc_controller.pb.go | 145 - .../internal/plugin/grpc_controller.proto | 11 - .../internal/plugin/grpc_stdio.pb.go | 233 - .../internal/plugin/grpc_stdio.proto | 30 - .../hashicorp/go-plugin/log_entry.go | 73 - vendor/github.com/hashicorp/go-plugin/mtls.go | 73 - .../hashicorp/go-plugin/mux_broker.go | 204 - .../github.com/hashicorp/go-plugin/plugin.go | 58 - .../github.com/hashicorp/go-plugin/process.go | 24 - .../hashicorp/go-plugin/process_posix.go | 20 - .../hashicorp/go-plugin/process_windows.go | 30 - .../hashicorp/go-plugin/protocol.go | 45 - .../hashicorp/go-plugin/rpc_client.go | 170 - .../hashicorp/go-plugin/rpc_server.go | 206 - .../github.com/hashicorp/go-plugin/server.go | 591 -- .../hashicorp/go-plugin/server_mux.go | 31 - .../github.com/hashicorp/go-plugin/stream.go | 18 - .../github.com/hashicorp/go-plugin/testing.go | 180 - .../hashicorp/go-retryablehttp/README.md | 19 - .../hashicorp/go-retryablehttp/client.go | 11 +- .../hashicorp/go-secure-stdlib/mlock/LICENSE | 363 - .../hashicorp/go-secure-stdlib/mlock/mlock.go | 15 - .../go-secure-stdlib/mlock/mlock_unavail.go | 13 - .../go-secure-stdlib/mlock/mlock_unix.go | 18 - .../github.com/hashicorp/go-uuid/.travis.yml | 12 - vendor/github.com/hashicorp/go-uuid/LICENSE | 365 - vendor/github.com/hashicorp/go-uuid/README.md | 8 - vendor/github.com/hashicorp/go-uuid/uuid.go | 83 - .../hashicorp/go-version/CHANGELOG.md | 45 - .../github.com/hashicorp/go-version/LICENSE | 354 - .../github.com/hashicorp/go-version/README.md | 66 - .../hashicorp/go-version/constraint.go | 296 - .../hashicorp/go-version/version.go | 407 - .../go-version/version_collection.go | 17 - vendor/github.com/hashicorp/vault/api/auth.go | 5 +- .../hashicorp/vault/api/auth_token.go | 3 + .../github.com/hashicorp/vault/api/client.go | 78 +- vendor/github.com/hashicorp/vault/api/help.go | 3 + vendor/github.com/hashicorp/vault/api/kv.go | 3 + .../github.com/hashicorp/vault/api/kv_v1.go | 3 + .../github.com/hashicorp/vault/api/kv_v2.go | 3 + .../hashicorp/vault/api/lifetime_watcher.go | 53 +- .../github.com/hashicorp/vault/api/logical.go | 71 +- .../hashicorp/vault/api/output_policy.go | 35 +- .../hashicorp/vault/api/output_string.go | 3 + .../hashicorp/vault/api/plugin_helpers.go | 16 +- .../helper/consts => api}/plugin_types.go | 9 +- .../github.com/hashicorp/vault/api/request.go | 7 +- .../hashicorp/vault/api/response.go | 17 +- .../github.com/hashicorp/vault/api/secret.go | 79 +- vendor/github.com/hashicorp/vault/api/ssh.go | 3 + .../hashicorp/vault/api/ssh_agent.go | 33 +- vendor/github.com/hashicorp/vault/api/sys.go | 3 + .../hashicorp/vault/api/sys_audit.go | 3 + .../hashicorp/vault/api/sys_auth.go | 3 + .../hashicorp/vault/api/sys_capabilities.go | 3 + .../hashicorp/vault/api/sys_config_cors.go | 3 + .../hashicorp/vault/api/sys_generate_root.go | 3 + .../hashicorp/vault/api/sys_hastatus.go | 3 + .../hashicorp/vault/api/sys_health.go | 3 + .../hashicorp/vault/api/sys_init.go | 3 + .../hashicorp/vault/api/sys_leader.go | 3 + .../hashicorp/vault/api/sys_leases.go | 3 + .../github.com/hashicorp/vault/api/sys_mfa.go | 3 + .../hashicorp/vault/api/sys_monitor.go | 7 +- .../hashicorp/vault/api/sys_mounts.go | 67 +- .../hashicorp/vault/api/sys_plugins.go | 28 +- .../hashicorp/vault/api/sys_policy.go | 3 + .../hashicorp/vault/api/sys_raft.go | 3 + .../hashicorp/vault/api/sys_rekey.go | 3 + .../hashicorp/vault/api/sys_rotate.go | 3 + .../hashicorp/vault/api/sys_seal.go | 36 +- .../hashicorp/vault/api/sys_stepdown.go | 3 + vendor/github.com/hashicorp/vault/sdk/LICENSE | 365 - .../vault/sdk/helper/certutil/helpers.go | 1386 --- .../vault/sdk/helper/certutil/types.go | 1015 -- .../vault/sdk/helper/compressutil/compress.go | 222 - .../vault/sdk/helper/consts/agent.go | 12 - .../vault/sdk/helper/consts/consts.go | 37 - .../sdk/helper/consts/deprecation_status.go | 31 - .../vault/sdk/helper/consts/error.go | 25 - .../vault/sdk/helper/consts/replication.go | 159 - .../vault/sdk/helper/consts/token_consts.go | 10 - .../vault/sdk/helper/cryptoutil/cryptoutil.go | 11 - .../vault/sdk/helper/errutil/error.go | 20 - .../hashicorp/vault/sdk/helper/hclutil/hcl.go | 36 - .../vault/sdk/helper/jsonutil/json.go | 100 - .../vault/sdk/helper/license/feature.go | 10 - .../vault/sdk/helper/locksutil/locks.go | 58 - .../vault/sdk/helper/logging/logging.go | 81 - .../sdk/helper/pathmanager/pathmanager.go | 136 - .../vault/sdk/helper/pluginutil/env.go | 77 - .../sdk/helper/pluginutil/multiplexing.go | 80 - .../sdk/helper/pluginutil/multiplexing.pb.go | 213 - .../sdk/helper/pluginutil/multiplexing.proto | 13 - .../helper/pluginutil/multiplexing_grpc.pb.go | 101 - .../vault/sdk/helper/pluginutil/run_config.go | 179 - .../vault/sdk/helper/pluginutil/runner.go | 115 - .../vault/sdk/helper/pluginutil/tls.go | 106 - .../vault/sdk/helper/strutil/strutil.go | 94 - .../vault/sdk/helper/wrapping/wrapinfo.go | 37 - .../hashicorp/vault/sdk/logical/audit.go | 19 - .../hashicorp/vault/sdk/logical/auth.go | 129 - .../hashicorp/vault/sdk/logical/connection.go | 18 - .../vault/sdk/logical/controlgroup.go | 17 - .../hashicorp/vault/sdk/logical/error.go | 122 - .../vault/sdk/logical/identity.pb.go | 709 -- .../vault/sdk/logical/identity.proto | 91 - .../hashicorp/vault/sdk/logical/lease.go | 53 - .../hashicorp/vault/sdk/logical/logical.go | 156 - .../vault/sdk/logical/logical_storage.go | 52 - .../vault/sdk/logical/managed_key.go | 97 - .../hashicorp/vault/sdk/logical/plugin.pb.go | 171 - .../hashicorp/vault/sdk/logical/plugin.proto | 16 - .../hashicorp/vault/sdk/logical/request.go | 394 - .../hashicorp/vault/sdk/logical/response.go | 322 - .../vault/sdk/logical/response_util.go | 204 - .../hashicorp/vault/sdk/logical/secret.go | 30 - .../hashicorp/vault/sdk/logical/storage.go | 158 - .../vault/sdk/logical/storage_inmem.go | 87 - .../vault/sdk/logical/storage_view.go | 110 - .../vault/sdk/logical/system_view.go | 235 - .../hashicorp/vault/sdk/logical/testing.go | 87 - .../hashicorp/vault/sdk/logical/token.go | 304 - .../vault/sdk/logical/translate_response.go | 161 - .../hashicorp/vault/sdk/logical/version.pb.go | 204 - .../hashicorp/vault/sdk/logical/version.proto | 17 - .../vault/sdk/logical/version_grpc.pb.go | 103 - .../hashicorp/vault/sdk/physical/cache.go | 260 - .../hashicorp/vault/sdk/physical/encoding.go | 108 - .../hashicorp/vault/sdk/physical/entry.go | 20 - .../hashicorp/vault/sdk/physical/error.go | 110 - .../vault/sdk/physical/inmem/inmem.go | 310 - .../vault/sdk/physical/inmem/inmem_ha.go | 167 - .../hashicorp/vault/sdk/physical/latency.go | 113 - .../hashicorp/vault/sdk/physical/physical.go | 134 - .../vault/sdk/physical/physical_access.go | 40 - .../vault/sdk/physical/physical_view.go | 94 - .../hashicorp/vault/sdk/physical/testing.go | 497 - .../vault/sdk/physical/transactions.go | 150 - .../hashicorp/vault/sdk/version/cgo.go | 7 - .../hashicorp/vault/sdk/version/version.go | 80 - .../vault/sdk/version/version_base.go | 17 - vendor/github.com/hashicorp/yamux/.gitignore | 23 - vendor/github.com/hashicorp/yamux/LICENSE | 362 - vendor/github.com/hashicorp/yamux/README.md | 86 - vendor/github.com/hashicorp/yamux/addr.go | 60 - vendor/github.com/hashicorp/yamux/const.go | 182 - vendor/github.com/hashicorp/yamux/mux.go | 114 - vendor/github.com/hashicorp/yamux/session.go | 732 -- vendor/github.com/hashicorp/yamux/spec.md | 140 - vendor/github.com/hashicorp/yamux/stream.go | 544 - vendor/github.com/hashicorp/yamux/util.go | 43 - .../inconshreveable/mousetrap/trap_others.go | 1 + .../inconshreveable/mousetrap/trap_windows.go | 88 +- .../mousetrap/trap_windows_1.4.go | 46 - .../jellydator/ttlcache/v2/Readme.md | 145 - .../jellydator/ttlcache/v2/cache.go | 605 -- .../ttlcache/v2/evictionreason_enumer.go | 52 - .../github.com/jellydator/ttlcache/v2/item.go | 46 - .../jellydator/ttlcache/v2/metrics.go | 15 - .../jellydator/ttlcache/v2/priority_queue.go | 85 - .../ttlcache/{v2 => v3}/CHANGELOG.md | 0 .../jellydator/ttlcache/{v2 => v3}/LICENSE | 0 .../jellydator/ttlcache/v3/README.md | 134 + .../jellydator/ttlcache/v3/cache.go | 587 ++ .../ttlcache/v3/expiration_queue.go | 85 + .../github.com/jellydator/ttlcache/v3/item.go | 130 + .../jellydator/ttlcache/v3/metrics.go | 21 + .../jellydator/ttlcache/v3/options.go | 67 + .../klauspost/compress/.goreleaser.yml | 2 +- .../github.com/klauspost/compress/README.md | 49 +- .../klauspost/compress/fse/compress.go | 31 +- .../klauspost/compress/fse/decompress.go | 4 +- .../klauspost/compress/huff0/bitreader.go | 8 +- .../klauspost/compress/huff0/bitwriter.go | 16 + .../klauspost/compress/huff0/compress.go | 117 +- .../klauspost/compress/huff0/decompress.go | 2 +- .../compress/huff0/decompress_amd64.s | 584 +- .../compress/internal/snapref/encode_other.go | 22 + .../klauspost/compress/zstd/blockdec.go | 18 +- .../klauspost/compress/zstd/blockenc.go | 9 +- .../klauspost/compress/zstd/bytebuf.go | 4 +- .../klauspost/compress/zstd/decodeheader.go | 9 +- .../klauspost/compress/zstd/decoder.go | 100 +- .../compress/zstd/decoder_options.go | 26 +- .../klauspost/compress/zstd/dict.go | 51 +- .../klauspost/compress/zstd/enc_base.go | 28 +- .../klauspost/compress/zstd/enc_best.go | 265 +- .../klauspost/compress/zstd/enc_better.go | 12 +- .../klauspost/compress/zstd/enc_dfast.go | 16 +- .../klauspost/compress/zstd/enc_fast.go | 12 +- .../klauspost/compress/zstd/encoder.go | 113 +- .../compress/zstd/encoder_options.go | 40 +- .../klauspost/compress/zstd/framedec.go | 82 +- .../compress/zstd/internal/xxhash/README.md | 49 +- .../compress/zstd/internal/xxhash/xxhash.go | 47 +- .../zstd/internal/xxhash/xxhash_amd64.s | 336 +- .../zstd/internal/xxhash/xxhash_arm64.s | 140 +- .../zstd/internal/xxhash/xxhash_asm.go | 2 +- .../zstd/internal/xxhash/xxhash_other.go | 19 +- .../klauspost/compress/zstd/seqdec.go | 11 +- .../klauspost/compress/zstd/seqdec_amd64.go | 17 +- .../klauspost/compress/zstd/seqdec_amd64.s | 136 +- .../klauspost/compress/zstd/zstd.go | 35 +- vendor/github.com/kylelemons/godebug/LICENSE | 202 + .../kylelemons/godebug/diff/diff.go | 186 + .../kylelemons/godebug/pretty/.gitignore | 5 + .../kylelemons/godebug/pretty/doc.go | 25 + .../kylelemons/godebug/pretty/public.go | 188 + .../kylelemons/godebug/pretty/reflect.go | 241 + .../kylelemons/godebug/pretty/structure.go | 223 + .../github.com/mattn/go-colorable/README.md | 48 - .../mattn/go-colorable/colorable_appengine.go | 38 - .../mattn/go-colorable/colorable_others.go | 38 - .../mattn/go-colorable/colorable_windows.go | 1047 -- .../github.com/mattn/go-colorable/go.test.sh | 12 - .../mattn/go-colorable/noncolorable.go | 57 - vendor/github.com/mattn/go-isatty/LICENSE | 9 - vendor/github.com/mattn/go-isatty/README.md | 50 - vendor/github.com/mattn/go-isatty/doc.go | 2 - vendor/github.com/mattn/go-isatty/go.test.sh | 12 - .../github.com/mattn/go-isatty/isatty_bsd.go | 19 - .../mattn/go-isatty/isatty_others.go | 16 - .../mattn/go-isatty/isatty_plan9.go | 23 - .../mattn/go-isatty/isatty_solaris.go | 21 - .../mattn/go-isatty/isatty_tcgets.go | 19 - .../mattn/go-isatty/isatty_windows.go | 125 - .../mitchellh/copystructure/README.md | 21 - .../mitchellh/copystructure/copier_time.go | 15 - .../mitchellh/copystructure/copystructure.go | 631 -- .../go-testing-interface/.travis.yml | 12 - .../mitchellh/go-testing-interface/README.md | 60 - .../mitchellh/go-testing-interface/testing.go | 112 - .../mitchellh/reflectwalk/.travis.yml | 1 - .../github.com/mitchellh/reflectwalk/LICENSE | 21 - .../mitchellh/reflectwalk/README.md | 6 - .../mitchellh/reflectwalk/location.go | 19 - .../mitchellh/reflectwalk/location_string.go | 16 - .../mitchellh/reflectwalk/reflectwalk.go | 420 - vendor/github.com/oklog/run/.gitignore | 14 - vendor/github.com/oklog/run/README.md | 75 - vendor/github.com/oklog/run/actors.go | 38 - vendor/github.com/oklog/run/group.go | 62 - .../image-spec/specs-go/v1/annotations.go | 9 - .../image-spec/specs-go/v1/artifact.go | 34 - .../image-spec/specs-go/v1/config.go | 29 +- .../image-spec/specs-go/v1/descriptor.go | 10 +- .../image-spec/specs-go/v1/index.go | 6 + .../image-spec/specs-go/v1/manifest.go | 3 + .../image-spec/specs-go/v1/mediatype.go | 19 +- .../image-spec/specs-go/version.go | 2 +- vendor/github.com/pierrec/lz4/.gitignore | 34 - vendor/github.com/pierrec/lz4/.travis.yml | 24 - vendor/github.com/pierrec/lz4/README.md | 90 - vendor/github.com/pierrec/lz4/block.go | 413 - vendor/github.com/pierrec/lz4/debug.go | 23 - vendor/github.com/pierrec/lz4/debug_stub.go | 7 - vendor/github.com/pierrec/lz4/decode_amd64.go | 8 - vendor/github.com/pierrec/lz4/decode_amd64.s | 375 - vendor/github.com/pierrec/lz4/decode_other.go | 98 - vendor/github.com/pierrec/lz4/errors.go | 30 - .../pierrec/lz4/internal/xxh32/xxh32zero.go | 223 - vendor/github.com/pierrec/lz4/lz4.go | 116 - vendor/github.com/pierrec/lz4/lz4_go1.10.go | 29 - .../github.com/pierrec/lz4/lz4_notgo1.10.go | 29 - vendor/github.com/pierrec/lz4/reader.go | 335 - .../github.com/pierrec/lz4/reader_legacy.go | 207 - vendor/github.com/pierrec/lz4/writer.go | 422 - .../github.com/pierrec/lz4/writer_legacy.go | 182 - .../{pierrec/lz4 => pkg/browser}/LICENSE | 7 +- vendor/github.com/pkg/browser/README.md | 55 + vendor/github.com/pkg/browser/browser.go | 57 + .../github.com/pkg/browser/browser_darwin.go | 5 + .../github.com/pkg/browser/browser_freebsd.go | 14 + .../github.com/pkg/browser/browser_linux.go | 21 + .../github.com/pkg/browser/browser_netbsd.go | 14 + .../github.com/pkg/browser/browser_openbsd.go | 14 + .../pkg/browser/browser_unsupported.go | 12 + .../github.com/pkg/browser/browser_windows.go | 7 + .../sigstore/pkg/cryptoutils/publickey.go | 2 +- .../sigstore/pkg/signature/kms/aws/LICENSE | 202 + .../sigstore/pkg/signature/kms/aws/client.go | 70 +- .../sigstore/pkg/signature/kms/aws/signer.go | 6 +- .../sigstore/pkg/signature/kms/azure/LICENSE | 202 + .../pkg/signature/kms/azure/client.go | 282 +- .../pkg/signature/kms/azure/signer.go | 35 +- .../sigstore/pkg/signature/kms/gcp/LICENSE | 202 + .../sigstore/pkg/signature/kms/gcp/client.go | 66 +- .../pkg/signature/kms/hashivault/LICENSE | 202 + .../pkg/signature/kms/hashivault/client.go | 40 +- .../sigstore/pkg/signature/options/noop.go | 16 +- .../sigstore/pkg/signature/payload/payload.go | 23 +- vendor/github.com/sirupsen/logrus/README.md | 8 +- vendor/github.com/sirupsen/logrus/writer.go | 34 +- vendor/github.com/spf13/cobra/.golangci.yml | 2 +- vendor/github.com/spf13/cobra/Makefile | 8 +- vendor/github.com/spf13/cobra/README.md | 4 +- vendor/github.com/spf13/cobra/active_help.go | 2 +- vendor/github.com/spf13/cobra/args.go | 4 +- .../spf13/cobra/bash_completions.go | 4 +- .../spf13/cobra/bash_completionsV2.go | 71 +- vendor/github.com/spf13/cobra/cobra.go | 6 +- vendor/github.com/spf13/cobra/command.go | 54 +- .../github.com/spf13/cobra/command_notwin.go | 2 +- vendor/github.com/spf13/cobra/command_win.go | 2 +- vendor/github.com/spf13/cobra/completions.go | 13 +- .../spf13/cobra/fish_completions.go | 78 +- vendor/github.com/spf13/cobra/flag_groups.go | 2 +- .../spf13/cobra/powershell_completions.go | 27 +- .../spf13/cobra/projects_using_cobra.md | 6 +- .../spf13/cobra/shell_completions.go | 2 +- .../spf13/cobra/shell_completions.md | 30 +- vendor/github.com/spf13/cobra/user_guide.md | 37 +- .../github.com/spf13/cobra/zsh_completions.go | 17 +- .../run => spiffe/go-spiffe/v2}/LICENSE | 0 .../go-spiffe/v2/bundle/jwtbundle/bundle.go | 201 + .../go-spiffe/v2/bundle/jwtbundle/doc.go | 43 + .../go-spiffe/v2/bundle/jwtbundle/set.go | 105 + .../go-spiffe/v2/bundle/jwtbundle/source.go | 12 + .../v2/bundle/spiffebundle/bundle.go | 486 + .../go-spiffe/v2/bundle/spiffebundle/doc.go | 59 + .../go-spiffe/v2/bundle/spiffebundle/set.go | 135 + .../v2/bundle/spiffebundle/source.go | 10 + .../go-spiffe/v2/bundle/x509bundle/bundle.go | 200 + .../go-spiffe/v2/bundle/x509bundle/doc.go | 42 + .../go-spiffe/v2/bundle/x509bundle/set.go | 105 + .../go-spiffe/v2/bundle/x509bundle/source.go | 12 + .../go-spiffe/v2/internal/cryptoutil/keys.go | 29 + .../go-spiffe/v2/internal/jwtutil/util.go | 34 + .../go-spiffe/v2/internal/pemutil/pem.go | 123 + .../go-spiffe/v2/internal/x509util/util.go | 53 + .../spiffe/go-spiffe/v2/logger/logger.go | 9 + .../spiffe/go-spiffe/v2/logger/null.go | 12 + .../spiffe/go-spiffe/v2/logger/std.go | 24 + .../spiffe/go-spiffe/v2/logger/writer.go | 31 + .../v2/proto/spiffe/workload/workload.pb.go | 1043 ++ .../v2/proto/spiffe/workload/workload.proto | 166 + .../proto/spiffe/workload/workload_grpc.pb.go | 355 + .../v2/spiffeid/charset_backcompat_allow.go | 42 + .../v2/spiffeid/charset_backcompat_deny.go | 12 + .../spiffe/go-spiffe/v2/spiffeid/errors.go | 15 + .../spiffe/go-spiffe/v2/spiffeid/id.go | 258 + .../spiffe/go-spiffe/v2/spiffeid/match.go | 47 + .../spiffe/go-spiffe/v2/spiffeid/path.go | 107 + .../spiffe/go-spiffe/v2/spiffeid/require.go | 103 + .../go-spiffe/v2/spiffeid/trustdomain.go | 122 + .../v2/spiffetls/tlsconfig/authorizer.go | 40 + .../v2/spiffetls/tlsconfig/config.go | 254 + .../go-spiffe/v2/spiffetls/tlsconfig/trace.go | 22 + .../go-spiffe/v2/svid/jwtsvid/source.go | 21 + .../spiffe/go-spiffe/v2/svid/jwtsvid/svid.go | 170 + .../go-spiffe/v2/svid/x509svid/source.go | 7 + .../spiffe/go-spiffe/v2/svid/x509svid/svid.go | 243 + .../go-spiffe/v2/svid/x509svid/verify.go | 113 + .../spiffe/go-spiffe/v2/workloadapi/addr.go | 69 + .../go-spiffe/v2/workloadapi/addr_posix.go | 34 + .../go-spiffe/v2/workloadapi/addr_windows.go | 33 + .../go-spiffe/v2/workloadapi/backoff.go | 34 + .../go-spiffe/v2/workloadapi/bundlesource.go | 188 + .../spiffe/go-spiffe/v2/workloadapi/client.go | 568 ++ .../go-spiffe/v2/workloadapi/client_posix.go | 29 + .../v2/workloadapi/client_windows.go | 57 + .../go-spiffe/v2/workloadapi/convenience.go | 124 + .../go-spiffe/v2/workloadapi/jwtsource.go | 109 + .../spiffe/go-spiffe/v2/workloadapi/option.go | 147 + .../v2/workloadapi/option_windows.go | 12 + .../go-spiffe/v2/workloadapi/watcher.go | 191 + .../go-spiffe/v2/workloadapi/x509context.go | 23 + .../go-spiffe/v2/workloadapi/x509source.go | 124 + .../to => spiffe/spire-api-sdk}/LICENSE | 14 +- .../spire/api/server/entry/v1/entry.pb.go | 1490 +++ .../spire/api/server/entry/v1/entry.proto | 178 + .../api/server/entry/v1/entry_grpc.pb.go | 358 + .../proto/spire/api/types/agent.pb.go | 353 + .../proto/spire/api/types/agent.proto | 50 + .../proto/spire/api/types/attestation.pb.go | 159 + .../proto/spire/api/types/attestation.proto | 12 + .../proto/spire/api/types/bundle.pb.go | 448 + .../proto/spire/api/types/bundle.proto | 52 + .../proto/spire/api/types/entry.pb.go | 536 + .../proto/spire/api/types/entry.proto | 112 + .../proto/spire/api/types/federateswith.pb.go | 271 + .../proto/spire/api/types/federateswith.proto | 68 + .../api/types/federationrelationship.pb.go | 464 + .../api/types/federationrelationship.proto | 59 + .../proto/spire/api/types/jointoken.pb.go | 158 + .../proto/spire/api/types/jointoken.proto | 11 + .../proto/spire/api/types/jwtsvid.pb.go | 200 + .../proto/spire/api/types/jwtsvid.proto | 27 + .../proto/spire/api/types/selector.pb.go | 346 + .../proto/spire/api/types/selector.proto | 78 + .../proto/spire/api/types/spiffeid.pb.go | 162 + .../proto/spire/api/types/spiffeid.proto | 15 + .../proto/spire/api/types/status.pb.go | 300 + .../proto/spire/api/types/status.proto | 29 + .../proto/spire/api/types/x509svid.pb.go | 191 + .../proto/spire/api/types/x509svid.proto | 25 + .../sidecarlogresults/sidecarlogresults.go | 22 +- .../pkg/apis/config/artifact_bucket.go | 112 - .../pipeline/pkg/apis/config/artifact_pvc.go | 86 - .../pipeline/pkg/apis/config/contexts.go | 48 - .../pipeline/pkg/apis/config/default.go | 15 +- .../pipeline/pkg/apis/config/events.go | 185 + .../pipeline/pkg/apis/config/feature_flags.go | 221 +- .../pipeline/pkg/apis/config/metrics.go | 3 + .../pipeline/pkg/apis/config/spire_config.go | 3 + .../pipeline/pkg/apis/config/store.go | 80 +- .../pkg/apis/config/trusted_resources.go | 72 - .../pkg/apis/config/zz_generated.deepcopy.go | 71 +- .../pipeline/pkg/apis/pipeline/constant.go | 22 + .../pipeline/pkg/apis/pipeline/controller.go | 2 - .../pipeline/pkg/apis/pipeline/images.go | 9 - .../pipeline/pkg/apis/pipeline/register.go | 9 +- .../pkg/apis/pipeline/v1/matrix_types.go | 350 + .../pipeline/pkg/apis/pipeline/v1/merge.go | 60 + .../pkg/apis/pipeline/v1/openapi_generated.go | 280 +- .../pkg/apis/pipeline/v1/param_types.go | 365 +- .../pkg/apis/pipeline/v1/pipeline_defaults.go | 31 +- .../pkg/apis/pipeline/v1/pipeline_types.go | 315 +- .../apis/pipeline/v1/pipeline_validation.go | 470 +- .../pipeline/v1/pipelineref_validation.go | 2 +- .../apis/pipeline/v1/pipelinerun_defaults.go | 24 +- .../pkg/apis/pipeline/v1/pipelinerun_types.go | 48 +- .../pipeline/v1/pipelinerun_validation.go | 77 +- .../pkg/apis/pipeline/v1/provenance.go | 29 +- .../pkg/apis/pipeline/v1/resolver_types.go | 2 +- .../pkg/apis/pipeline/v1/result_types.go | 2 +- .../pkg/apis/pipeline/v1/result_validation.go | 14 +- .../pkg/apis/pipeline/v1/resultref.go | 19 +- .../pkg/apis/pipeline/v1/swagger.json | 160 +- .../pkg/apis/pipeline/v1/task_types.go | 7 +- .../pkg/apis/pipeline/v1/task_validation.go | 242 +- .../pkg/apis/pipeline/v1/taskref_types.go | 15 +- .../apis/pipeline/v1/taskref_validation.go | 16 +- .../pkg/apis/pipeline/v1/taskrun_defaults.go | 17 +- .../pkg/apis/pipeline/v1/taskrun_types.go | 17 +- .../apis/pipeline/v1/taskrun_validation.go | 13 +- .../pkg/apis/pipeline/v1/when_types.go | 4 +- .../pkg/apis/pipeline/v1/workspace_types.go | 1 + .../apis/pipeline/v1/workspace_validation.go | 12 - .../apis/pipeline/v1/zz_generated.deepcopy.go | 202 +- .../pkg/apis/pipeline/v1alpha1/run_types.go | 4 +- .../v1alpha1/verificationpolicy_types.go | 14 + .../v1alpha1/verificationpolicy_validation.go | 3 + .../v1alpha1/zz_generated.deepcopy.go | 2 +- .../pipeline/v1beta1/cluster_task_types.go | 4 +- .../v1beta1/cluster_task_validation.go | 7 +- .../pipeline/v1beta1/container_conversion.go | 20 +- .../apis/pipeline/v1beta1/container_types.go | 93 +- .../apis/pipeline/v1beta1/customrun_types.go | 11 +- .../pkg/apis/pipeline/v1beta1/matrix_types.go | 350 + .../pipeline/v1beta1/openapi_generated.go | 718 +- .../apis/pipeline/v1beta1/param_conversion.go | 19 +- .../pkg/apis/pipeline/v1beta1/param_types.go | 353 +- .../pipeline/v1beta1/pipeline_conversion.go | 90 +- .../pipeline/v1beta1/pipeline_defaults.go | 31 +- .../pipeline/v1beta1/pipeline_interface.go | 2 +- .../apis/pipeline/v1beta1/pipeline_types.go | 409 +- .../pipeline/v1beta1/pipeline_validation.go | 546 +- .../v1beta1/pipelineref_conversion.go | 20 +- .../pipeline/v1beta1/pipelineref_types.go | 1 + .../v1beta1/pipelineref_validation.go | 2 +- .../v1beta1/pipelinerun_conversion.go | 57 +- .../pipeline/v1beta1/pipelinerun_defaults.go | 19 +- .../pipeline/v1beta1/pipelinerun_types.go | 73 +- .../v1beta1/pipelinerun_validation.go | 14 +- .../pkg/apis/pipeline/v1beta1/provenance.go | 46 +- .../pipeline/v1beta1/provenance_conversion.go | 20 +- .../pipeline/v1beta1/resolver_conversion.go | 18 +- .../apis/pipeline/v1beta1/resolver_types.go | 2 +- .../apis/pipeline/v1beta1/resource_paths.go | 40 - .../apis/pipeline/v1beta1/resource_types.go | 250 +- .../v1beta1/resource_types_validation.go | 101 - .../pipeline/v1beta1/result_conversion.go | 36 +- .../pkg/apis/pipeline/v1beta1/result_types.go | 2 +- .../pipeline/v1beta1/result_validation.go | 6 - .../pkg/apis/pipeline/v1beta1/resultref.go | 8 +- .../pkg/apis/pipeline/v1beta1/swagger.json | 463 +- .../apis/pipeline/v1beta1/task_conversion.go | 193 +- .../pkg/apis/pipeline/v1beta1/task_types.go | 61 +- .../apis/pipeline/v1beta1/task_validation.go | 234 +- .../pipeline/v1beta1/taskref_conversion.go | 21 +- .../apis/pipeline/v1beta1/taskref_types.go | 14 +- .../pipeline/v1beta1/taskref_validation.go | 24 +- .../pipeline/v1beta1/taskrun_conversion.go | 84 +- .../apis/pipeline/v1beta1/taskrun_defaults.go | 17 +- .../apis/pipeline/v1beta1/taskrun_types.go | 42 +- .../pipeline/v1beta1/taskrun_validation.go | 16 +- .../pkg/apis/pipeline/v1beta1/when_types.go | 4 +- .../pipeline/v1beta1/workspace_conversion.go | 19 +- .../apis/pipeline/v1beta1/workspace_types.go | 1 + .../pipeline/v1beta1/zz_generated.deepcopy.go | 239 +- .../v1alpha1/resolution_request_conversion.go | 55 +- .../v1alpha1/resolution_request_types.go | 6 +- .../v1alpha1/zz_generated.deepcopy.go | 8 +- .../v1beta1/resolution_request_types.go | 11 +- .../v1beta1/zz_generated.deepcopy.go | 11 +- .../pipeline/pkg/apis/resource/resource.go | 43 - .../pkg/apis/resource/v1alpha1/doc.go | 4 - .../resource/v1alpha1/git/git_resource.go | 212 - .../resource/v1alpha1/image/image_resource.go | 95 - .../v1alpha1/pipeline_resource_types.go | 52 +- .../v1alpha1/pipelineresource_validation.go | 81 - .../pkg/apis/resource/v1alpha1/register.go | 3 - .../v1alpha1/storage/artifact_bucket.go | 103 - .../resource/v1alpha1/storage/artifact_pvc.go | 104 - .../pkg/apis/resource/v1alpha1/storage/gcs.go | 172 - .../apis/resource/v1alpha1/storage/secret.go | 60 - .../apis/resource/v1alpha1/storage/storage.go | 92 - .../pipeline/pkg/apis/version/conversion.go | 6 +- .../pkg/artifacts/artifacts_storage.go | 244 - .../clientset/versioned/fake/register.go | 14 +- .../clientset/versioned/scheme/register.go | 14 +- .../{v1beta1 => v1}/taskrun/fake/fake.go | 4 +- .../{v1beta1 => v1}/taskrun/taskrun.go | 34 +- .../{v1beta1 => v1}/taskrun/controller.go | 10 +- .../{v1beta1 => v1}/taskrun/reconciler.go | 65 +- .../pipeline/{v1beta1 => v1}/taskrun/state.go | 4 +- .../clientset/versioned/scheme/register.go | 14 +- .../resource/clientset/versioned/clientset.go | 121 - .../clientset/versioned/scheme/register.go | 56 - .../resource/v1alpha1/pipelineresource.go | 178 - .../informers/externalversions/factory.go | 180 - .../informers/externalversions/generic.go | 62 - .../internalinterfaces/factory_interfaces.go | 40 - .../resource/v1alpha1/pipelineresource.go | 90 - .../resource/injection/client/client.go | 243 - .../injection/informers/factory/factory.go | 56 - .../pipelineresource/pipelineresource.go | 116 - .../resource/v1alpha1/pipelineresource.go | 99 - .../pkg/container/container_replacements.go | 6 +- .../pkg/container/sidecar_replacements.go | 4 +- .../pkg/container/step_replacements.go | 8 +- .../pkg/credentials/dockercreds/creds.go | 7 +- .../pkg/credentials/gitcreds/basic.go | 3 +- .../pkg/credentials/gitcreds/creds.go | 6 +- .../pipeline/pkg/credentials/initialize.go | 2 +- .../affinityassistant_types.go | 56 + .../computeresources/tasklevel/tasklevel.go | 12 +- .../pkg/internal/resolution/resolved_meta.go | 9 +- .../tektoncd/pipeline/pkg/pod/entrypoint.go | 14 +- .../pkg/pod/entrypoint_lookup_impl.go | 7 +- .../tektoncd/pipeline/pkg/pod/pod.go | 148 +- .../tektoncd/pipeline/pkg/pod/script.go | 232 +- .../pipeline/pkg/pod/scripts_constants.go | 16 + .../tektoncd/pipeline/pkg/pod/status.go | 148 +- .../pipeline/pkg/pod/workingdir_init.go | 16 +- .../pipeline/pkg/reconciler/constant.go | 22 + .../pkg/reconciler/events/cache/cache.go | 21 +- .../cloudevent/cloud_event_controller.go | 39 +- .../events/cloudevent/cloudevent.go | 86 +- .../reconciler/events/cloudevent/interface.go | 2 +- .../pkg/reconciler/events/k8sevent/events.go | 9 +- .../pipelinerun/pipelinespec/pipelinespec.go | 30 +- .../reconciler/pipelinerun/resources/apply.go | 151 +- .../resources/input_output_steps.go | 115 - .../pipelinerun/resources/pipelineref.go | 179 +- .../resources/pipelinerunresolution.go | 708 +- .../pipelinerun/resources/pipelinerunstate.go | 199 +- .../resources/resultrefresolution.go | 136 +- .../resources/validate_dependencies.go | 24 +- .../pipelinerun/resources/validate_params.go | 131 +- .../pkg/reconciler/taskrun/controller.go | 18 +- .../pkg/reconciler/taskrun/resources/apply.go | 92 +- .../taskrun/resources/image_exporter.go | 100 - .../taskrun/resources/input_resources.go | 146 - .../taskrun/resources/output_resource.go | 136 - .../reconciler/taskrun/resources/taskref.go | 199 +- .../resources/taskresourceresolution.go | 95 - .../reconciler/taskrun/resources/taskspec.go | 50 +- .../taskrun/resources/validate_params.go | 47 + .../reconciler/taskrun/resources/volume.go | 70 - .../pkg/reconciler/taskrun/taskrun.go | 307 +- .../pkg/reconciler/taskrun/tracing.go | 4 +- .../reconciler/taskrun/validate_resources.go | 563 -- .../reconciler/taskrun/validate_taskrun.go | 311 + .../pkg/reconciler/volumeclaim/pvchandler.go | 75 +- .../tektoncd/pipeline/pkg/remote/errors.go | 17 +- .../pipeline/pkg/remote/oci/resolver.go | 219 - .../pipeline/pkg/remote/resolution/error.go | 60 +- .../pkg/remote/resolution/resolver.go | 23 +- .../tektoncd/pipeline/pkg/remote/resolver.go | 6 +- .../pipeline/pkg/resolution/common/errors.go | 70 +- .../pkg/resolution/common/interface.go | 63 + .../pkg/resolution/resource/crd_resource.go | 26 +- .../pipeline/pkg/resolution/resource/name.go | 13 +- .../pkg/resolution/resource/request.go | 8 +- .../pkg/resolution/resource/resource.go | 31 +- .../tektoncd/pipeline/pkg/result/result.go | 92 + .../tektoncd/pipeline/pkg/spire/controller.go | 338 + .../pipeline/pkg/spire/entrypointer.go | 99 + .../tektoncd/pipeline/pkg/spire/sign.go | 157 + .../tektoncd/pipeline/pkg/spire/spire.go | 77 + .../tektoncd/pipeline/pkg/spire/spire_mock.go | 306 + .../tektoncd/pipeline/pkg/spire/verify.go | 354 + .../pipeline/pkg/substitution/substitution.go | 141 +- .../pipeline/pkg/taskrunmetrics/fake/fake.go | 2 +- .../pipeline/pkg/taskrunmetrics/injection.go | 4 +- .../pipeline/pkg/taskrunmetrics/metrics.go | 189 +- .../pipeline/pkg/termination/parse.go | 14 +- .../pipeline/pkg/termination/write.go | 10 +- .../pipeline/pkg/trustedresources/errors.go | 18 +- .../pkg/trustedresources/verifier/errors.go | 40 +- .../pkg/trustedresources/verifier/verifier.go | 43 +- .../pipeline/pkg/trustedresources/verify.go | 252 +- .../tektoncd/pipeline/pkg/workspace/apply.go | 74 +- .../pipeline/pkg/workspace/validate.go | 8 +- vendor/github.com/vbatts/tar-split/LICENSE | 28 + .../vbatts/tar-split/archive/tar/common.go | 723 ++ .../vbatts/tar-split/archive/tar/format.go | 303 + .../vbatts/tar-split/archive/tar/reader.go | 925 ++ .../tar-split/archive/tar/stat_actime1.go | 20 + .../tar-split/archive/tar/stat_actime2.go | 20 + .../vbatts/tar-split/archive/tar/stat_unix.go | 96 + .../vbatts/tar-split/archive/tar/strconv.go | 326 + .../vbatts/tar-split/archive/tar/writer.go | 653 ++ vendor/github.com/zeebo/errs/.gitignore | 1 + vendor/github.com/zeebo/errs/AUTHORS | 4 + .../go-hclog => zeebo/errs}/LICENSE | 2 +- vendor/github.com/zeebo/errs/README.md | 235 + vendor/github.com/zeebo/errs/errs.go | 293 + vendor/github.com/zeebo/errs/group.go | 100 + .../go.opentelemetry.io/otel/.codespellignore | 5 + vendor/go.opentelemetry.io/otel/.codespellrc | 10 + vendor/go.opentelemetry.io/otel/.gitignore | 3 + vendor/go.opentelemetry.io/otel/.golangci.yml | 4 +- vendor/go.opentelemetry.io/otel/.lycheeignore | 2 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 278 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 2 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 108 +- vendor/go.opentelemetry.io/otel/Makefile | 59 +- vendor/go.opentelemetry.io/otel/README.md | 12 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 7 +- .../go.opentelemetry.io/otel/attribute/set.go | 20 +- .../otel/attribute/value.go | 16 +- .../otel/baggage/baggage.go | 56 +- vendor/go.opentelemetry.io/otel/codes/doc.go | 2 +- vendor/go.opentelemetry.io/otel/handler.go | 64 +- .../otel/internal/attribute/attribute.go | 82 +- .../otel/internal/global/handler.go | 103 + .../otel/internal/global/instruments.go | 359 + .../otel/internal/global/internal_logging.go | 19 +- .../otel/internal/global/meter.go | 354 + .../otel/internal/global/state.go | 45 +- vendor/go.opentelemetry.io/otel/metric.go | 53 + .../otel/metric}/LICENSE | 14 +- .../otel/metric/asyncfloat64.go | 271 + .../otel/metric/asyncint64.go | 269 + .../go.opentelemetry.io/otel/metric/config.go | 92 + vendor/go.opentelemetry.io/otel/metric/doc.go | 170 + .../otel/metric/embedded/embedded.go | 234 + .../otel/metric/instrument.go | 332 + .../go.opentelemetry.io/otel/metric/meter.go | 210 + .../otel/metric/syncfloat64.go | 179 + .../otel/metric/syncint64.go | 179 + .../go.opentelemetry.io/otel/requirements.txt | 1 + .../otel/sdk/internal/env/env.go | 10 +- .../otel/sdk/internal/internal.go | 11 +- .../otel/sdk/resource/auto.go | 58 +- .../otel/sdk/resource/builtin.go | 8 +- .../otel/sdk/resource/config.go | 7 + .../otel/sdk/resource/container.go | 2 +- .../otel/sdk/resource/doc.go | 3 + .../otel/sdk/resource/env.go | 16 +- .../otel/sdk/resource/host_id.go | 120 + .../otel/sdk/resource/host_id_bsd.go | 23 + .../otel/sdk/resource/host_id_darwin.go | 19 + .../otel/sdk/resource/host_id_exec.go | 29 + .../otel/sdk/resource/host_id_linux.go | 22 + .../otel/sdk/resource/host_id_readfile.go | 28 + .../otel/sdk/resource/host_id_unsupported.go | 36 + .../otel/sdk/resource/host_id_windows.go | 48 + .../otel/sdk/resource/os.go | 2 +- .../otel/sdk/resource/os_release_unix.go | 8 +- .../otel/sdk/resource/process.go | 16 +- .../otel/sdk/resource/resource.go | 18 +- .../otel/sdk/trace/batch_span_processor.go | 2 +- .../otel/sdk/trace/provider.go | 99 +- .../otel/sdk/trace/simple_span_processor.go | 9 +- .../otel/sdk/trace/span.go | 16 +- .../otel/sdk/trace/span_exporter.go | 2 +- .../otel/sdk/trace/span_processor.go | 4 +- .../otel/sdk/trace/tracer.go | 2 +- .../otel/sdk/trace/version.go | 20 + .../go.opentelemetry.io/otel/sdk/version.go | 20 + .../otel/semconv/internal/http.go | 8 +- .../otel/semconv/v1.17.0/event.go | 199 + .../go.opentelemetry.io/otel/trace/config.go | 17 + vendor/go.opentelemetry.io/otel/trace/noop.go | 4 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 15 +- vendor/golang.org/x/crypto/blake2b/blake2b.go | 291 - .../x/crypto/blake2b/blake2bAVX2_amd64.go | 38 - .../x/crypto/blake2b/blake2bAVX2_amd64.s | 745 -- .../x/crypto/blake2b/blake2b_amd64.go | 25 - .../x/crypto/blake2b/blake2b_amd64.s | 279 - .../x/crypto/blake2b/blake2b_generic.go | 182 - .../x/crypto/blake2b/blake2b_ref.go | 12 - vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 - .../golang.org/x/crypto/blake2b/register.go | 33 - .../chacha20poly1305/chacha20poly1305.go | 98 + .../chacha20poly1305_amd64.go | 87 + .../chacha20poly1305/chacha20poly1305_amd64.s | 2696 +++++ .../chacha20poly1305_generic.go | 81 + .../chacha20poly1305_noasm.go | 16 + .../chacha20poly1305/xchacha20poly1305.go | 86 + vendor/golang.org/x/crypto/cryptobyte/asn1.go | 8 + .../x/crypto/curve25519/curve25519.go | 99 +- .../x/crypto/curve25519/curve25519_compat.go | 105 + .../x/crypto/curve25519/curve25519_go120.go | 46 + vendor/golang.org/x/crypto/hkdf/hkdf.go | 93 + vendor/golang.org/x/crypto/ssh/cipher.go | 3 +- vendor/golang.org/x/crypto/ssh/common.go | 11 +- vendor/golang.org/x/crypto/ssh/connection.go | 2 +- vendor/golang.org/x/crypto/ssh/keys.go | 6 +- vendor/golang.org/x/crypto/ssh/mac.go | 4 + vendor/golang.org/x/crypto/ssh/transport.go | 3 +- vendor/golang.org/x/exp/LICENSE | 27 + vendor/golang.org/x/exp/PATENTS | 22 + vendor/golang.org/x/exp/maps/maps.go | 94 + vendor/golang.org/x/net/http2/frame.go | 11 +- vendor/golang.org/x/net/http2/h2c/h2c.go | 2 +- vendor/golang.org/x/net/http2/hpack/hpack.go | 79 +- vendor/golang.org/x/net/http2/pipe.go | 6 +- vendor/golang.org/x/net/http2/server.go | 34 +- vendor/golang.org/x/net/http2/transport.go | 60 +- vendor/golang.org/x/net/http2/writesched.go | 3 +- .../x/net/http2/writesched_roundrobin.go | 119 + vendor/golang.org/x/oauth2/README.md | 12 +- vendor/golang.org/x/oauth2/google/default.go | 37 +- vendor/golang.org/x/oauth2/google/doc.go | 65 +- vendor/golang.org/x/oauth2/google/google.go | 15 +- .../externalaccount/basecredentials.go | 32 +- vendor/golang.org/x/oauth2/internal/oauth2.go | 2 +- vendor/golang.org/x/oauth2/internal/token.go | 64 +- vendor/golang.org/x/oauth2/oauth2.go | 33 +- vendor/golang.org/x/oauth2/token.go | 33 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 10 +- vendor/golang.org/x/sync/errgroup/go120.go | 14 + .../golang.org/x/sync/errgroup/pre_go120.go | 15 + vendor/golang.org/x/sys/cpu/endian_little.go | 4 +- vendor/golang.org/x/sys/cpu/hwcap_linux.go | 15 + vendor/golang.org/x/sys/cpu/runtime_auxv.go | 16 + .../x/sys/cpu/runtime_auxv_go121.go | 19 + vendor/golang.org/x/sys/execabs/execabs.go | 2 +- .../golang.org/x/sys/execabs/execabs_go118.go | 6 + .../golang.org/x/sys/execabs/execabs_go119.go | 4 + vendor/golang.org/x/sys/unix/ioctl_signed.go | 70 + .../sys/unix/{ioctl.go => ioctl_unsigned.go} | 21 +- vendor/golang.org/x/sys/unix/ioctl_zos.go | 20 +- vendor/golang.org/x/sys/unix/mkall.sh | 2 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 11 +- vendor/golang.org/x/sys/unix/ptrace_darwin.go | 6 + vendor/golang.org/x/sys/unix/ptrace_ios.go | 6 + vendor/golang.org/x/sys/unix/syscall_aix.go | 7 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 - .../x/sys/unix/syscall_aix_ppc64.go | 1 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 3 +- .../golang.org/x/sys/unix/syscall_darwin.go | 15 +- .../x/sys/unix/syscall_darwin_amd64.go | 1 + .../x/sys/unix/syscall_darwin_arm64.go | 1 + .../x/sys/unix/syscall_dragonfly.go | 2 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 44 +- .../x/sys/unix/syscall_freebsd_386.go | 17 +- .../x/sys/unix/syscall_freebsd_amd64.go | 17 +- .../x/sys/unix/syscall_freebsd_arm.go | 15 +- .../x/sys/unix/syscall_freebsd_arm64.go | 15 +- .../x/sys/unix/syscall_freebsd_riscv64.go | 15 +- vendor/golang.org/x/sys/unix/syscall_hurd.go | 8 + vendor/golang.org/x/sys/unix/syscall_linux.go | 72 +- .../x/sys/unix/syscall_linux_386.go | 27 - .../x/sys/unix/syscall_linux_amd64.go | 1 - .../x/sys/unix/syscall_linux_arm.go | 27 - .../x/sys/unix/syscall_linux_arm64.go | 10 - .../x/sys/unix/syscall_linux_loong64.go | 5 - .../x/sys/unix/syscall_linux_mips64x.go | 1 - .../x/sys/unix/syscall_linux_mipsx.go | 27 - .../x/sys/unix/syscall_linux_ppc.go | 27 - .../x/sys/unix/syscall_linux_ppc64x.go | 1 - .../x/sys/unix/syscall_linux_riscv64.go | 1 - .../x/sys/unix/syscall_linux_s390x.go | 1 - .../x/sys/unix/syscall_linux_sparc64.go | 1 - .../golang.org/x/sys/unix/syscall_netbsd.go | 7 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 19 +- .../golang.org/x/sys/unix/syscall_solaris.go | 36 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 7 + .../x/sys/unix/syscall_zos_s390x.go | 6 +- .../x/sys/unix/zerrors_darwin_amd64.go | 19 + .../x/sys/unix/zerrors_darwin_arm64.go | 19 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 24 +- .../x/sys/unix/zerrors_linux_sparc64.go | 48 + .../x/sys/unix/zptrace_armnn_linux.go | 8 +- .../x/sys/unix/zptrace_linux_arm64.go | 4 +- .../x/sys/unix/zptrace_mipsnn_linux.go | 8 +- .../x/sys/unix/zptrace_mipsnnle_linux.go | 8 +- .../x/sys/unix/zptrace_x86_linux.go | 8 +- .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 23 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 24 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 17 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 18 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 55 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 11 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 55 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 11 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 20 +- .../x/sys/unix/zsyscall_freebsd_386.go | 30 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 30 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 30 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 30 +- .../x/sys/unix/zsyscall_freebsd_riscv64.go | 30 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 34 +- .../x/sys/unix/zsyscall_linux_386.go | 10 - .../x/sys/unix/zsyscall_linux_amd64.go | 10 - .../x/sys/unix/zsyscall_linux_arm.go | 10 - .../x/sys/unix/zsyscall_linux_arm64.go | 10 - .../x/sys/unix/zsyscall_linux_mips.go | 10 - .../x/sys/unix/zsyscall_linux_mips64.go | 10 - .../x/sys/unix/zsyscall_linux_mips64le.go | 10 - .../x/sys/unix/zsyscall_linux_mipsle.go | 10 - .../x/sys/unix/zsyscall_linux_ppc.go | 10 - .../x/sys/unix/zsyscall_linux_ppc64.go | 10 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 10 - .../x/sys/unix/zsyscall_linux_riscv64.go | 10 - .../x/sys/unix/zsyscall_linux_s390x.go | 10 - .../x/sys/unix/zsyscall_linux_sparc64.go | 10 - .../x/sys/unix/zsyscall_netbsd_386.go | 20 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 20 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 20 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 20 +- .../x/sys/unix/zsyscall_openbsd_386.go | 44 +- .../x/sys/unix/zsyscall_openbsd_386.s | 15 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 46 +- .../x/sys/unix/zsyscall_openbsd_amd64.s | 15 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 44 +- .../x/sys/unix/zsyscall_openbsd_arm.s | 15 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 44 +- .../x/sys/unix/zsyscall_openbsd_arm64.s | 15 +- .../x/sys/unix/zsyscall_openbsd_mips64.go | 44 +- .../x/sys/unix/zsyscall_openbsd_mips64.s | 15 +- .../x/sys/unix/zsyscall_openbsd_ppc64.go | 44 +- .../x/sys/unix/zsyscall_openbsd_ppc64.s | 18 +- .../x/sys/unix/zsyscall_openbsd_riscv64.go | 44 +- .../x/sys/unix/zsyscall_openbsd_riscv64.s | 15 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 26 +- .../x/sys/unix/zsyscall_zos_s390x.go | 12 +- .../x/sys/unix/ztypes_darwin_amd64.go | 11 + .../x/sys/unix/ztypes_darwin_arm64.go | 11 + .../x/sys/unix/ztypes_freebsd_386.go | 2 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 2 +- .../x/sys/unix/ztypes_freebsd_arm.go | 2 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 2 +- .../x/sys/unix/ztypes_freebsd_riscv64.go | 2 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 186 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 2 +- .../x/sys/unix/ztypes_linux_amd64.go | 2 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2 +- .../x/sys/unix/ztypes_linux_arm64.go | 2 +- .../x/sys/unix/ztypes_linux_loong64.go | 2 +- .../x/sys/unix/ztypes_linux_mips.go | 2 +- .../x/sys/unix/ztypes_linux_mips64.go | 2 +- .../x/sys/unix/ztypes_linux_mips64le.go | 2 +- .../x/sys/unix/ztypes_linux_mipsle.go | 2 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 2 +- .../x/sys/unix/ztypes_linux_ppc64.go | 2 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 2 +- .../x/sys/unix/ztypes_linux_riscv64.go | 2 +- .../x/sys/unix/ztypes_linux_s390x.go | 2 +- .../x/sys/unix/ztypes_linux_sparc64.go | 2 +- .../golang.org/x/sys/windows/env_windows.go | 6 +- .../golang.org/x/sys/windows/exec_windows.go | 7 +- vendor/golang.org/x/sys/windows/service.go | 7 + .../x/sys/windows/syscall_windows.go | 19 +- .../golang.org/x/sys/windows/types_windows.go | 95 +- .../x/sys/windows/zsyscall_windows.go | 44 +- .../x/text/unicode/norm/forminfo.go | 2 +- vendor/golang.org/x/time/rate/rate.go | 20 +- .../x/tools/go/packages/packages.go | 34 +- .../x/tools/go/types/objectpath/objectpath.go | 762 ++ .../x/tools/internal/gcimporter/gcimporter.go | 12 + .../x/tools/internal/gcimporter/iexport.go | 136 +- .../x/tools/internal/gcimporter/iimport.go | 116 +- .../tools/internal/gcimporter/ureader_yes.go | 41 +- .../x/tools/internal/pkgbits/decoder.go | 2 +- .../x/tools/internal/pkgbits/encoder.go | 2 +- .../internal/tokeninternal/tokeninternal.go | 151 + .../x/tools/internal/typeparams/common.go | 1 - .../x/tools/internal/typesinternal/types.go | 9 + .../api/googleapi/googleapi.go | 5 +- vendor/google.golang.org/api/internal/cba.go | 282 + .../cert/default_cert.go | 0 .../cert/enterprise_cert.go | 4 +- .../cert/secureconnect_cert.go | 3 +- .../google.golang.org/api/internal/creds.go | 79 +- .../api/internal/impersonate/impersonate.go | 3 +- vendor/google.golang.org/api/internal/s2a.go | 136 + .../api/internal/settings.go | 1 + .../google.golang.org/api/internal/version.go | 2 +- .../option/internaloption/internaloption.go | 15 + .../api/transport/grpc/dial.go | 31 +- .../transport/http/configure_http2_go116.go | 26 - .../http/configure_http2_not_go116.go | 17 - .../api/transport/http/dial.go | 35 +- .../api/transport/internal/dca/dca.go | 143 - .../genproto/googleapis/api/LICENSE | 202 + .../googleapis/api/annotations/client.pb.go | 407 +- .../api/annotations/field_behavior.pb.go | 20 +- .../googleapis/api/annotations/http.pb.go | 17 +- .../googleapis/api/annotations/resource.pb.go | 66 +- .../googleapis/api/annotations/routing.pb.go | 62 +- .../googleapis/api/httpbody/httpbody.pb.go | 4 +- .../googleapis/api/launch_stage.pb.go | 4 +- .../genproto/googleapis/api/tidyfix.go | 23 + .../genproto/googleapis/cloud/kms/v1/alias.go | 652 -- .../genproto/googleapis/iam/v1/alias.go | 208 - .../genproto/googleapis/rpc/LICENSE | 202 + .../genproto/internal/doc.go | 17 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 25 +- .../grpc/attributes/attributes.go | 29 + .../grpc/balancer/balancer.go | 8 + .../grpclb/grpc_lb_v1/load_balancer.pb.go | 9 +- .../grpc_lb_v1/load_balancer_grpc.pb.go | 10 +- .../grpc/balancer/grpclb/grpclb.go | 4 +- .../balancer/grpclb/grpclb_remote_balancer.go | 2 +- .../grpc/balancer_conn_wrappers.go | 486 +- .../grpc_binarylog_v1/binarylog.pb.go | 9 +- vendor/google.golang.org/grpc/call.go | 5 + vendor/google.golang.org/grpc/clientconn.go | 687 +- .../grpc/codes/code_string.go | 51 +- .../alts/internal/handshaker/handshaker.go | 54 +- .../internal/handshaker/service/service.go | 18 + .../internal/proto/grpc_gcp/altscontext.pb.go | 9 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 9 +- .../proto/grpc_gcp/handshaker_grpc.pb.go | 10 +- .../grpc_gcp/transport_security_common.pb.go | 9 +- .../grpc/credentials/oauth/oauth.go | 8 +- .../google.golang.org/grpc/credentials/tls.go | 4 +- vendor/google.golang.org/grpc/dialoptions.go | 72 +- .../grpc/encoding/encoding.go | 4 +- .../grpc/grpclog/loggerv2.go | 7 +- .../grpc/health/grpc_health_v1/health.pb.go | 9 +- .../health/grpc_health_v1/health_grpc.pb.go | 15 +- vendor/google.golang.org/grpc/idle.go | 287 + .../grpc/internal/binarylog/binarylog.go | 11 +- .../grpc/internal/binarylog/method_logger.go | 142 +- .../grpc/internal/binarylog/sink.go | 12 +- .../grpc/internal/buffer/unbounded.go | 26 +- .../grpc/internal/envconfig/envconfig.go | 43 +- .../grpc/internal/envconfig/observability.go | 6 + .../grpc/internal/envconfig/xds.go | 48 +- .../googlecloud/manufacturer_linux.go | 4 +- .../grpc/internal/grpclog/prefixLogger.go | 12 + .../grpc/internal/grpcrand/grpcrand.go | 14 + .../internal/grpcsync/callback_serializer.go | 119 + .../grpc/internal/internal.go | 37 + .../grpc/internal/metadata/metadata.go | 62 +- .../internal/resolver/dns/dns_resolver.go | 6 +- .../resolver/passthrough/passthrough.go | 11 +- .../grpc/internal/resolver/unix/unix.go | 4 +- .../grpc/internal/serviceconfig/duration.go | 130 + .../grpc/internal/transport/controlbuf.go | 127 +- .../grpc/internal/transport/defaults.go | 6 + .../grpc/internal/transport/handler_server.go | 52 +- .../grpc/internal/transport/http2_client.go | 124 +- .../grpc/internal/transport/http2_server.go | 218 +- .../grpc/internal/transport/http_util.go | 26 +- .../grpc/internal/transport/logging.go | 40 + .../grpc/internal/transport/transport.go | 31 +- .../grpc/metadata/metadata.go | 13 +- .../google.golang.org/grpc/picker_wrapper.go | 62 +- vendor/google.golang.org/grpc/pickfirst.go | 58 +- .../grpc/reflection/README.md | 18 - .../grpc_reflection_v1alpha/reflection.pb.go | 955 -- .../grpc_reflection_v1alpha/reflection.proto | 138 - .../reflection_grpc.pb.go | 155 - .../grpc/reflection/serverreflection.go | 324 - vendor/google.golang.org/grpc/regenerate.sh | 7 +- .../grpc/resolver/resolver.go | 58 +- .../grpc/resolver_conn_wrapper.go | 229 +- vendor/google.golang.org/grpc/rpc_util.go | 70 +- vendor/google.golang.org/grpc/server.go | 288 +- .../google.golang.org/grpc/service_config.go | 83 +- vendor/google.golang.org/grpc/stats/stats.go | 22 +- .../google.golang.org/grpc/status/status.go | 53 +- vendor/google.golang.org/grpc/stream.go | 128 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 34 +- .../protobuf/encoding/protojson/doc.go | 2 +- .../encoding/protojson/well_known_types.go | 12 +- .../protobuf/encoding/protowire/wire.go | 8 +- .../protobuf/internal/encoding/json/decode.go | 2 +- .../protobuf/internal/encoding/text/decode.go | 5 +- .../internal/encoding/text/decode_number.go | 43 +- .../protobuf/internal/genid/descriptor_gen.go | 90 +- .../protobuf/internal/impl/convert.go | 1 - .../protobuf/internal/strs/strings_unsafe.go | 2 +- .../protobuf/internal/version/version.go | 4 +- .../google.golang.org/protobuf/proto/doc.go | 9 +- .../google.golang.org/protobuf/proto/equal.go | 172 +- .../reflect/protoreflect/source_gen.go | 14 + .../protobuf/reflect/protoreflect/value.go | 2 +- .../reflect/protoreflect/value_equal.go | 168 + .../reflect/protoreflect/value_union.go | 4 +- .../reflect/protoregistry/registry.go | 2 +- .../types/descriptorpb/descriptor.pb.go | 1547 +-- .../protobuf/types/known/anypb/any.pb.go | 135 +- .../types/known/durationpb/duration.pb.go | 63 +- .../protobuf/types/known/emptypb/empty.pb.go | 8 +- .../types/known/fieldmaskpb/field_mask.pb.go | 137 +- .../types/known/structpb/struct.pb.go | 810 ++ .../types/known/timestamppb/timestamp.pb.go | 61 +- .../types/known/wrapperspb/wrappers.pb.go | 2 +- .../admissionregistration/v1/generated.pb.go | 484 +- .../admissionregistration/v1/generated.proto | 77 + .../api/admissionregistration/v1/types.go | 77 + .../v1/types_swagger_doc_generated.go | 14 +- .../v1/zz_generated.deepcopy.go | 26 + .../api/admissionregistration/v1alpha1/doc.go | 23 + .../v1alpha1/generated.pb.go | 4269 ++++++++ .../v1alpha1/generated.proto | 527 + .../v1alpha1/register.go | 56 + .../admissionregistration/v1alpha1/types.go | 570 ++ .../v1alpha1/types_swagger_doc_generated.go | 191 + .../v1alpha1/zz_generated.deepcopy.go | 444 + .../v1beta1/generated.pb.go | 1014 +- .../v1beta1/generated.proto | 135 +- .../admissionregistration/v1beta1/types.go | 147 +- .../v1beta1/types_swagger_doc_generated.go | 35 +- .../v1beta1/zz_generated.deepcopy.go | 99 +- vendor/k8s.io/api/apidiscovery/v2beta1/doc.go | 24 + .../api/apidiscovery/v2beta1/generated.pb.go | 1744 ++++ .../api/apidiscovery/v2beta1/generated.proto | 156 + .../api/apidiscovery/v2beta1/register.go | 56 + .../k8s.io/api/apidiscovery/v2beta1/types.go | 163 + .../v2beta1/zz_generated.deepcopy.go | 190 + .../zz_generated.prerelease-lifecycle.go | 58 + .../v1alpha1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/apps/v1/generated.pb.go | 481 +- vendor/k8s.io/api/apps/v1/generated.proto | 32 +- vendor/k8s.io/api/apps/v1/types.go | 32 +- .../apps/v1/types_swagger_doc_generated.go | 20 +- .../api/apps/v1/zz_generated.deepcopy.go | 21 + .../k8s.io/api/apps/v1beta1/generated.pb.go | 459 +- .../k8s.io/api/apps/v1beta1/generated.proto | 82 +- vendor/k8s.io/api/apps/v1beta1/types.go | 82 +- .../v1beta1/types_swagger_doc_generated.go | 70 +- .../api/apps/v1beta1/zz_generated.deepcopy.go | 21 + .../k8s.io/api/apps/v1beta2/generated.pb.go | 498 +- .../k8s.io/api/apps/v1beta2/generated.proto | 34 +- vendor/k8s.io/api/apps/v1beta2/types.go | 34 +- .../v1beta2/types_swagger_doc_generated.go | 22 +- .../api/apps/v1beta2/zz_generated.deepcopy.go | 21 + .../v1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/authentication/v1alpha1/doc.go | 23 + .../authentication/v1alpha1/generated.pb.go | 567 ++ .../authentication/v1alpha1/generated.proto | 51 + .../api/authentication/v1alpha1/register.go | 51 + .../api/authentication/v1alpha1/types.go | 48 + .../v1alpha1/types_swagger_doc_generated.go | 49 + .../v1alpha1}/zz_generated.deepcopy.go | 45 +- .../zz_generated.prerelease-lifecycle.go | 40 + .../authentication/v1beta1/generated.pb.go | 476 +- .../authentication/v1beta1/generated.proto | 21 + .../api/authentication/v1beta1/register.go | 1 + .../api/authentication/v1beta1/types.go | 27 + .../v1beta1/types_swagger_doc_generated.go | 21 +- .../v1beta1/zz_generated.deepcopy.go | 44 + .../zz_generated.prerelease-lifecycle.go | 18 + .../v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/autoscaling/v1/generated.proto | 42 +- vendor/k8s.io/api/autoscaling/v1/types.go | 79 +- .../v1/types_swagger_doc_generated.go | 42 +- .../k8s.io/api/autoscaling/v2/generated.proto | 20 +- vendor/k8s.io/api/autoscaling/v2/types.go | 61 +- .../v2/types_swagger_doc_generated.go | 22 +- .../api/autoscaling/v2beta1/generated.proto | 4 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 4 +- .../v2beta1/types_swagger_doc_generated.go | 6 +- .../api/autoscaling/v2beta2/generated.proto | 24 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 62 +- .../v2beta2/types_swagger_doc_generated.go | 26 +- vendor/k8s.io/api/batch/v1/generated.proto | 31 +- vendor/k8s.io/api/batch/v1/types.go | 59 +- .../batch/v1/types_swagger_doc_generated.go | 28 +- .../k8s.io/api/batch/v1beta1/generated.pb.go | 317 +- .../k8s.io/api/batch/v1beta1/generated.proto | 15 +- vendor/k8s.io/api/batch/v1beta1/register.go | 1 - vendor/k8s.io/api/batch/v1beta1/types.go | 20 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../batch/v1beta1/zz_generated.deepcopy.go | 27 - .../zz_generated.prerelease-lifecycle.go | 18 - vendor/k8s.io/api/certificates/v1/types.go | 3 +- .../v1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/certificates/v1alpha1/doc.go | 24 + .../api/certificates/v1alpha1/generated.pb.go | 831 ++ .../api/certificates/v1alpha1/generated.proto | 103 + .../api/certificates/v1alpha1/register.go | 61 + .../k8s.io/api/certificates/v1alpha1/types.go | 106 + .../v1alpha1/types_swagger_doc_generated.go | 60 + .../v1alpha1/zz_generated.deepcopy.go | 102 + .../zz_generated.prerelease-lifecycle.go | 58 + .../api/certificates/v1beta1/generated.proto | 6 +- .../k8s.io/api/certificates/v1beta1/types.go | 9 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../api/coordination/v1/generated.proto | 6 +- vendor/k8s.io/api/coordination/v1/types.go | 6 +- .../v1/types_swagger_doc_generated.go | 8 +- .../api/coordination/v1beta1/generated.proto | 6 +- .../k8s.io/api/coordination/v1beta1/types.go | 6 +- .../v1beta1/types_swagger_doc_generated.go | 8 +- .../api/core/v1/annotation_key_constants.go | 21 +- vendor/k8s.io/api/core/v1/generated.pb.go | 4839 ++++++--- vendor/k8s.io/api/core/v1/generated.proto | 304 +- vendor/k8s.io/api/core/v1/toleration.go | 14 +- vendor/k8s.io/api/core/v1/types.go | 404 +- .../core/v1/types_swagger_doc_generated.go | 129 +- .../api/core/v1/zz_generated.deepcopy.go | 160 +- .../k8s.io/api/discovery/v1/generated.proto | 32 +- vendor/k8s.io/api/discovery/v1/types.go | 48 +- .../v1/types_swagger_doc_generated.go | 18 +- .../api/discovery/v1beta1/generated.proto | 19 +- vendor/k8s.io/api/discovery/v1beta1/types.go | 36 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../events/v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/extensions/v1beta1/generated.pb.go | 8862 ++++++----------- .../api/extensions/v1beta1/generated.proto | 346 +- .../k8s.io/api/extensions/v1beta1/register.go | 2 - vendor/k8s.io/api/extensions/v1beta1/types.go | 442 +- .../v1beta1/types_swagger_doc_generated.go | 203 +- .../v1beta1/zz_generated.deepcopy.go | 433 +- .../zz_generated.prerelease-lifecycle.go | 48 - .../api/flowcontrol/v1alpha1/generated.pb.go | 253 +- .../api/flowcontrol/v1alpha1/generated.proto | 29 + .../k8s.io/api/flowcontrol/v1alpha1/types.go | 37 +- .../v1alpha1/types_swagger_doc_generated.go | 4 +- .../v1alpha1/zz_generated.deepcopy.go | 10 + .../zz_generated.prerelease-lifecycle.go | 8 +- .../api/flowcontrol/v1beta1/generated.pb.go | 252 +- .../api/flowcontrol/v1beta1/generated.proto | 29 + .../k8s.io/api/flowcontrol/v1beta1/types.go | 37 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../v1beta1/zz_generated.deepcopy.go | 10 + .../zz_generated.prerelease-lifecycle.go | 8 +- .../api/flowcontrol/v1beta2/generated.pb.go | 252 +- .../api/flowcontrol/v1beta2/generated.proto | 29 + .../k8s.io/api/flowcontrol/v1beta2/types.go | 33 + .../v1beta2/types_swagger_doc_generated.go | 4 +- .../v1beta2/zz_generated.deepcopy.go | 10 + .../zz_generated.prerelease-lifecycle.go | 28 + .../api/flowcontrol/v1beta3/doc.go} | 16 +- .../api/flowcontrol/v1beta3/generated.pb.go | 5428 ++++++++++ .../api/flowcontrol/v1beta3/generated.proto | 473 + .../api/flowcontrol/v1beta3/register.go | 58 + .../k8s.io/api/flowcontrol/v1beta3/types.go | 612 ++ .../v1beta3/types_swagger_doc_generated.go | 263 + .../v1beta3/zz_generated.deepcopy.go | 552 + .../zz_generated.prerelease-lifecycle.go | 94 + .../k8s.io/api/networking/v1/generated.pb.go | 911 +- .../k8s.io/api/networking/v1/generated.proto | 217 +- vendor/k8s.io/api/networking/v1/types.go | 229 +- .../v1/types_swagger_doc_generated.go | 139 +- .../networking/v1/zz_generated.deepcopy.go | 67 + .../api/networking/v1alpha1/generated.pb.go | 1199 ++- .../api/networking/v1alpha1/generated.proto | 79 +- .../api/networking/v1alpha1/register.go | 12 +- .../k8s.io/api/networking/v1alpha1/types.go | 86 +- .../v1alpha1/types_swagger_doc_generated.go | 56 +- .../networking/v1alpha1/well_known_labels.go | 33 + .../v1alpha1/zz_generated.deepcopy.go | 97 + .../zz_generated.prerelease-lifecycle.go | 36 + .../api/networking/v1beta1/generated.pb.go | 830 +- .../api/networking/v1beta1/generated.proto | 113 +- vendor/k8s.io/api/networking/v1beta1/types.go | 122 +- .../v1beta1/types_swagger_doc_generated.go | 89 +- .../v1beta1/zz_generated.deepcopy.go | 67 + vendor/k8s.io/api/node/v1/generated.proto | 10 +- vendor/k8s.io/api/node/v1/types.go | 12 +- .../node/v1/types_swagger_doc_generated.go | 12 +- .../k8s.io/api/node/v1alpha1/generated.proto | 14 +- vendor/k8s.io/api/node/v1alpha1/types.go | 16 +- .../v1alpha1/types_swagger_doc_generated.go | 14 +- .../k8s.io/api/node/v1beta1/generated.proto | 12 +- vendor/k8s.io/api/node/v1beta1/types.go | 14 +- .../v1beta1/types_swagger_doc_generated.go | 12 +- vendor/k8s.io/api/policy/v1/generated.pb.go | 150 +- vendor/k8s.io/api/policy/v1/generated.proto | 28 + vendor/k8s.io/api/policy/v1/types.go | 48 + .../policy/v1/types_swagger_doc_generated.go | 11 +- .../api/policy/v1/zz_generated.deepcopy.go | 5 + .../k8s.io/api/policy/v1beta1/generated.pb.go | 287 +- .../k8s.io/api/policy/v1beta1/generated.proto | 28 + vendor/k8s.io/api/policy/v1beta1/types.go | 48 + .../v1beta1/types_swagger_doc_generated.go | 11 +- .../policy/v1beta1/zz_generated.deepcopy.go | 5 + .../rbac/v1/types_swagger_doc_generated.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/resource/v1alpha2/doc.go | 24 + .../api/resource/v1alpha2/generated.pb.go | 4817 +++++++++ .../api/resource/v1alpha2/generated.proto | 400 + .../k8s.io/api/resource/v1alpha2/register.go | 63 + vendor/k8s.io/api/resource/v1alpha2/types.go | 462 + .../v1alpha2/types_swagger_doc_generated.go | 232 + .../v1alpha2/zz_generated.deepcopy.go | 498 + .../k8s.io/api/scheduling/v1/generated.proto | 4 +- vendor/k8s.io/api/scheduling/v1/types.go | 4 +- .../v1/types_swagger_doc_generated.go | 6 +- .../api/scheduling/v1alpha1/generated.proto | 4 +- .../k8s.io/api/scheduling/v1alpha1/types.go | 4 +- .../v1alpha1/types_swagger_doc_generated.go | 6 +- .../api/scheduling/v1beta1/generated.proto | 4 +- vendor/k8s.io/api/scheduling/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- vendor/k8s.io/api/storage/v1/generated.proto | 126 +- vendor/k8s.io/api/storage/v1/types.go | 132 +- .../storage/v1/types_swagger_doc_generated.go | 82 +- .../api/storage/v1alpha1/generated.proto | 38 +- vendor/k8s.io/api/storage/v1alpha1/types.go | 41 +- .../v1alpha1/types_swagger_doc_generated.go | 38 +- .../api/storage/v1beta1/generated.proto | 112 +- vendor/k8s.io/api/storage/v1beta1/types.go | 119 +- .../v1beta1/types_swagger_doc_generated.go | 78 +- .../apimachinery/pkg/api/meta/errors.go | 35 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 3 +- .../pkg/api/validation/objectmeta.go | 9 +- .../pkg/apis/meta/internalversion/defaults.go | 38 + .../pkg/apis/meta/internalversion/types.go | 25 + .../zz_generated.conversion.go | 2 + .../internalversion/zz_generated.deepcopy.go | 5 + .../pkg/apis/meta/v1/generated.pb.go | 385 +- .../pkg/apis/meta/v1/generated.proto | 69 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 2 +- .../apimachinery/pkg/apis/meta/v1/types.go | 69 +- .../meta/v1/types_swagger_doc_generated.go | 25 +- .../pkg/apis/meta/v1/validation/validation.go | 40 +- .../apis/meta/v1/zz_generated.conversion.go | 7 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 5 + .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/apimachinery/pkg/labels/labels.go | 2 + .../apimachinery/pkg/labels/selector.go | 141 +- .../pkg/runtime/schema/group_version.go | 6 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 3 +- .../pkg/runtime/serializer/codec_factory.go | 3 +- .../serializer/versioning/versioning.go | 2 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 2 +- .../apimachinery/pkg/types/namespacedname.go | 10 + .../apimachinery/pkg/util/errors/errors.go | 2 +- .../apimachinery/pkg/util/framer/framer.go | 2 +- .../pkg/util/managedfields/endpoints.yaml | 7018 +++++++++++++ .../pkg/util/managedfields/fieldmanager.go | 57 + .../managedfields/internal/atmostevery.go | 60 + .../internal/buildmanagerinfo.go | 74 + .../managedfields/internal/capmanagers.go | 133 + .../util/managedfields/internal/conflict.go | 89 + .../managedfields/internal/fieldmanager.go | 206 + .../pkg/util/managedfields/internal/fields.go | 47 + .../managedfields/internal/lastapplied.go | 50 + .../internal/lastappliedmanager.go | 171 + .../internal/lastappliedupdater.go | 102 + .../managedfields/internal/managedfields.go | 248 + .../internal/managedfieldsupdater.go | 82 + .../util/managedfields/internal/manager.go | 52 + .../managedfields/internal/pathelement.go | 140 + .../managedfields/internal/skipnonapplied.go | 91 + .../util/managedfields/internal/stripmeta.go | 90 + .../managedfields/internal/structuredmerge.go | 183 + .../managedfields/internal/typeconverter.go | 193 + .../internal/versionconverter.go | 123 + .../pkg/util/managedfields/node.yaml | 261 + .../pkg/util/managedfields/pod.yaml | 121 + .../pkg/util/managedfields/scalehandler.go | 174 + .../pkg/util/managedfields/typeconverter.go | 47 + .../apimachinery/pkg/util/mergepatch/util.go | 3 +- .../k8s.io/apimachinery/pkg/util/sets/byte.go | 148 +- .../k8s.io/apimachinery/pkg/util/sets/doc.go | 7 +- .../apimachinery/pkg/util/sets/empty.go | 4 +- .../k8s.io/apimachinery/pkg/util/sets/int.go | 148 +- .../apimachinery/pkg/util/sets/int32.go | 148 +- .../apimachinery/pkg/util/sets/int64.go | 148 +- .../apimachinery/pkg/util/sets/ordered.go | 53 + .../k8s.io/apimachinery/pkg/util/sets/set.go | 241 + .../apimachinery/pkg/util/sets/string.go | 148 +- .../pkg/util/strategicpatch/OWNERS | 1 + .../pkg/util/strategicpatch/patch.go | 2 +- .../pkg/util/validation/validation.go | 8 +- .../apimachinery/pkg/util/wait/backoff.go | 502 + .../apimachinery/pkg/util/wait/delay.go | 51 + .../apimachinery/pkg/util/wait/error.go | 96 + .../k8s.io/apimachinery/pkg/util/wait/loop.go | 86 + .../k8s.io/apimachinery/pkg/util/wait/poll.go | 315 + .../apimachinery/pkg/util/wait/timer.go | 121 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 634 +- .../v1/matchcondition.go | 48 + .../v1/mutatingwebhook.go | 14 + .../v1/validatingwebhook.go | 14 + .../v1alpha1/auditannotation.go | 48 + .../v1alpha1/expressionwarning.go | 48 + .../v1alpha1/matchcondition.go | 48 + .../v1alpha1/matchresources.go | 90 + .../namedrulewithoperations.go} | 39 +- .../v1alpha1/paramkind.go | 48 + .../v1alpha1/paramref.go | 48 + .../v1alpha1/typechecking.go | 44 + .../v1alpha1/validatingadmissionpolicy.go | 256 + .../validatingadmissionpolicybinding.go | 247 + .../validatingadmissionpolicybindingspec.go | 72 + .../v1alpha1/validatingadmissionpolicyspec.go | 103 + .../validatingadmissionpolicystatus.go | 66 + .../v1alpha1/validation.go | 70 + .../v1beta1/matchcondition.go | 48 + .../v1beta1/mutatingwebhook.go | 29 +- .../admissionregistration/v1beta1/rule.go | 76 - .../v1beta1/validatingwebhook.go | 29 +- .../apps/v1/statefulsetordinals.go | 39 + .../apps/v1/statefulsetspec.go | 9 + .../v1beta1/statefulsetordinals.go} | 20 +- .../apps/v1beta1/statefulsetspec.go | 9 + .../apps/v1beta2/statefulsetordinals.go | 39 + .../apps/v1beta2/statefulsetspec.go | 9 + .../v1alpha1/clustertrustbundle.go | 247 + .../v1alpha1/clustertrustbundlespec.go | 48 + .../core/v1/claimsource.go | 48 + .../applyconfigurations/core/v1/container.go | 58 +- .../core/v1/containerresizepolicy.go | 52 + .../core/v1/containerstatus.go | 40 +- .../core/v1/ephemeralcontainer.go | 13 + .../core/v1/ephemeralcontainercommon.go | 58 +- .../core/v1/persistentvolumeclaimspec.go | 4 +- .../v1/podresourceclaim.go} | 30 +- .../core/v1/podschedulinggate.go | 39 + .../applyconfigurations/core/v1/podspec.go | 28 + .../applyconfigurations/core/v1/podstatus.go | 9 + .../v1/resourceclaim.go} | 14 +- .../core/v1/resourcerequirements.go | 18 +- .../core/v1/servicespec.go | 8 +- .../core/v1/typedobjectreference.go | 66 + .../extensions/v1beta1/allowedhostpath.go | 48 - .../v1beta1/fsgroupstrategyoptions.go | 57 - .../extensions/v1beta1/hostportrange.go | 48 - .../extensions/v1beta1/idrange.go | 48 - .../v1beta1/ingressloadbalanceringress.go | 62 + .../v1beta1/ingressloadbalancerstatus.go | 44 + .../extensions/v1beta1/ingressportstatus.go | 61 + .../extensions/v1beta1/ingressstatus.go | 8 +- .../v1beta1/podsecuritypolicyspec.go | 285 - .../v1beta1/runasgroupstrategyoptions.go | 57 - .../v1beta1/runasuserstrategyoptions.go | 57 - .../v1beta1/runtimeclassstrategyoptions.go | 50 - .../v1beta1/selinuxstrategyoptions.go | 53 - .../supplementalgroupsstrategyoptions.go | 57 - .../limitedprioritylevelconfiguration.go | 18 + .../limitedprioritylevelconfiguration.go | 18 + .../limitedprioritylevelconfiguration.go | 18 + .../v1beta3/flowdistinguishermethod.go | 43 + .../flowcontrol/v1beta3/flowschema.go | 256 + .../v1beta3/flowschemacondition.go | 80 + .../flowcontrol/v1beta3/flowschemaspec.go | 71 + .../flowcontrol/v1beta3/flowschemastatus.go | 44 + .../flowcontrol/v1beta3/groupsubject.go | 39 + .../limitedprioritylevelconfiguration.go | 66 + .../flowcontrol/v1beta3/limitresponse.go | 52 + .../v1beta3/nonresourcepolicyrule.go | 52 + .../v1beta3/policyruleswithsubjects.go | 72 + .../v1beta3/prioritylevelconfiguration.go | 256 + .../prioritylevelconfigurationcondition.go | 80 + .../prioritylevelconfigurationreference.go | 39 + .../v1beta3/prioritylevelconfigurationspec.go | 52 + .../prioritylevelconfigurationstatus.go | 44 + .../v1beta3/queuingconfiguration.go | 57 + .../flowcontrol/v1beta3/resourcepolicyrule.go | 83 + .../v1beta3/serviceaccountsubject.go | 48 + .../flowcontrol/v1beta3/subject.go | 70 + .../flowcontrol/v1beta3/usersubject.go | 39 + .../applyconfigurations/internal/internal.go | 1742 +++- .../applyconfigurations/meta/v1/listmeta.go | 66 - .../v1/ingressloadbalanceringress.go | 62 + .../v1/ingressloadbalancerstatus.go | 44 + .../networking/v1/ingressportstatus.go | 61 + .../networking/v1/ingressstatus.go | 8 +- .../v1alpha1/ipaddress.go} | 86 +- .../networking/v1alpha1/ipaddressspec.go | 39 + .../networking/v1alpha1/parentreference.go | 79 + .../v1beta1/ingressloadbalanceringress.go | 62 + .../v1beta1/ingressloadbalancerstatus.go | 44 + .../networking/v1beta1/ingressportstatus.go | 61 + .../networking/v1beta1/ingressstatus.go | 8 +- .../policy/v1/poddisruptionbudgetspec.go | 16 +- .../policy/v1beta1/poddisruptionbudgetspec.go | 16 +- .../resource/v1alpha2/allocationresult.go | 66 + .../resource/v1alpha2/podschedulingcontext.go | 258 + .../v1alpha2/podschedulingcontextspec.go | 50 + .../v1alpha2/podschedulingcontextstatus.go | 44 + .../resource/v1alpha2/resourceclaim.go | 258 + .../resourceclaimconsumerreference.go | 70 + .../resourceclaimparametersreference.go | 57 + .../v1alpha2/resourceclaimschedulingstatus.go | 50 + .../resource/v1alpha2/resourceclaimspec.go | 61 + .../resource/v1alpha2/resourceclaimstatus.go | 71 + .../v1alpha2/resourceclaimtemplate.go | 249 + .../v1alpha2/resourceclaimtemplatespec.go | 188 + .../resource/v1alpha2/resourceclass.go | 266 + .../resourceclassparametersreference.go | 66 + .../resource/v1alpha2/resourcehandle.go | 48 + .../discovery/aggregated_discovery.go | 154 + .../client-go/discovery/discovery_client.go | 270 +- .../client-go/discovery/fake/discovery.go | 10 +- vendor/k8s.io/client-go/dynamic/simple.go | 71 +- .../admissionregistration/interface.go | 8 + .../v1alpha1/interface.go | 52 + .../v1alpha1/validatingadmissionpolicy.go | 89 + .../validatingadmissionpolicybinding.go | 89 + .../informers/certificates/interface.go | 8 + .../v1alpha1/clustertrustbundle.go | 89 + .../certificates}/v1alpha1/interface.go | 14 +- vendor/k8s.io/client-go/informers/doc.go | 18 + .../informers/extensions/v1beta1/interface.go | 7 - vendor/k8s.io/client-go/informers/factory.go | 85 +- .../informers/flowcontrol/interface.go | 8 + .../v1beta3/flowschema.go} | 44 +- .../flowcontrol/v1beta3/interface.go | 52 + .../v1beta3/prioritylevelconfiguration.go | 89 + vendor/k8s.io/client-go/informers/generic.go | 40 +- .../networking/v1alpha1/interface.go | 7 + .../networking/v1alpha1/ipaddress.go | 89 + .../informers}/resource/interface.go | 16 +- .../informers/resource/v1alpha2/interface.go | 66 + .../resource/v1alpha2/podschedulingcontext.go | 90 + .../resource/v1alpha2/resourceclaim.go | 90 + .../v1alpha2/resourceclaimtemplate.go | 90 + .../resource/v1alpha2/resourceclass.go | 89 + .../k8s.io/client-go/kubernetes/clientset.go | 160 +- vendor/k8s.io/client-go/kubernetes/doc.go | 7 +- .../client-go/kubernetes/scheme/register.go | 10 + .../v1alpha1/admissionregistration_client.go | 112 + .../admissionregistration}/v1alpha1/doc.go | 2 +- .../v1alpha1/generated_expansion.go | 23 + .../v1alpha1/validatingadmissionpolicy.go | 243 + .../validatingadmissionpolicybinding.go | 197 + .../v1alpha1/authentication_client.go | 107 + .../typed/authentication/v1alpha1}/doc.go | 6 +- .../v1alpha1/generated_expansion.go | 4 +- .../v1alpha1/selfsubjectreview.go | 64 + .../v1beta1/authentication_client.go | 5 + .../v1beta1/generated_expansion.go | 2 + .../v1beta1/selfsubjectreview.go | 64 + .../v1alpha1/certificates_client.go} | 40 +- .../v1alpha1/clustertrustbundle.go | 197 + .../typed/certificates/v1alpha1/doc.go | 20 + .../v1alpha1/generated_expansion.go} | 7 +- .../typed/events/v1beta1/event_expansion.go | 3 +- .../extensions/v1beta1/extensions_client.go | 5 - .../extensions/v1beta1/generated_expansion.go | 2 - .../extensions/v1beta1/podsecuritypolicy.go | 197 - .../typed/flowcontrol/v1beta3/doc.go | 20 + .../flowcontrol/v1beta3/flowcontrol_client.go | 112 + .../typed/flowcontrol/v1beta3/flowschema.go | 243 + .../v1beta3/generated_expansion.go | 23 + .../v1beta3/prioritylevelconfiguration.go | 243 + .../v1alpha1/generated_expansion.go | 2 + .../typed/networking/v1alpha1/ipaddress.go | 197 + .../networking/v1alpha1/networking_client.go | 5 + .../kubernetes/typed/resource/v1alpha2/doc.go | 20 + .../resource/v1alpha2/generated_expansion.go | 27 + .../resource/v1alpha2/podschedulingcontext.go | 256 + .../resource/v1alpha2/resource_client.go | 122 + .../typed/resource/v1alpha2/resourceclaim.go | 256 + .../v1alpha2/resourceclaimtemplate.go | 208 + .../typed/resource/v1alpha2/resourceclass.go | 197 + .../v1alpha1/expansion_generated.go | 14 +- .../v1alpha1/validatingadmissionpolicy.go | 68 + .../validatingadmissionpolicybinding.go | 68 + .../v1alpha1/clustertrustbundle.go | 68 + .../v1alpha1/expansion_generated.go | 23 + .../extensions/v1beta1/expansion_generated.go | 4 - .../extensions/v1beta1/podsecuritypolicy.go | 68 - .../v1beta3/expansion_generated.go} | 25 +- .../listers/flowcontrol/v1beta3/flowschema.go | 68 + .../v1beta3/prioritylevelconfiguration.go | 68 + .../v1alpha1/expansion_generated.go | 4 + .../listers/networking/v1alpha1/ipaddress.go | 68 + .../resource/v1alpha2/expansion_generated.go | 47 + .../resource/v1alpha2/podschedulingcontext.go | 99 + .../resource/v1alpha2/resourceclaim.go | 99 + .../v1alpha2/resourceclaimtemplate.go | 99 + .../resource/v1alpha2/resourceclass.go | 68 + vendor/k8s.io/client-go/openapi/OWNERS | 4 + .../k8s.io/client-go/openapi/groupversion.go | 23 +- .../pkg/apis/clientauthentication/types.go | 5 + .../pkg/apis/clientauthentication/v1/types.go | 5 + .../v1/zz_generated.conversion.go | 2 + .../clientauthentication/v1beta1/types.go | 5 + .../v1beta1/zz_generated.conversion.go | 2 + vendor/k8s.io/client-go/pkg/version/base.go | 3 +- .../plugin/pkg/client/auth/exec/exec.go | 13 +- vendor/k8s.io/client-go/rest/client.go | 3 +- vendor/k8s.io/client-go/rest/config.go | 10 +- vendor/k8s.io/client-go/rest/exec.go | 4 +- vendor/k8s.io/client-go/rest/request.go | 98 +- vendor/k8s.io/client-go/rest/transport.go | 5 +- vendor/k8s.io/client-go/rest/with_retry.go | 38 +- vendor/k8s.io/client-go/testing/fixture.go | 2 +- .../k8s.io/client-go/tools/auth/clientauth.go | 5 +- .../client-go/tools/cache/controller.go | 92 +- .../client-go/tools/cache/delta_fifo.go | 142 +- .../client-go/tools/cache/expiration_cache.go | 2 - vendor/k8s.io/client-go/tools/cache/fifo.go | 14 +- .../k8s.io/client-go/tools/cache/listers.go | 20 +- .../k8s.io/client-go/tools/cache/reflector.go | 477 +- .../client-go/tools/cache/shared_informer.go | 304 +- .../client-go/tools/cache/synctrack/lazy.go | 83 + .../tools/cache/synctrack/synctrack.go | 120 + .../tools/cache/thread_safe_store.go | 303 +- .../client-go/tools/clientcmd/api/helpers.go | 91 +- .../client-go/tools/clientcmd/api/types.go | 5 + .../client-go/tools/clientcmd/api/v1/types.go | 5 + .../api/v1/zz_generated.conversion.go | 2 + .../client-go/tools/clientcmd/auth_loaders.go | 3 +- .../tools/clientcmd/client_config.go | 7 +- .../client-go/tools/clientcmd/loader.go | 9 +- .../client-go/tools/clientcmd/overrides.go | 42 +- .../tools/leaderelection/leaderelection.go | 15 +- .../resourcelock/configmaplock.go | 4 +- .../resourcelock/endpointslock.go | 4 +- .../leaderelection/resourcelock/interface.go | 2 +- .../leaderelection/resourcelock/leaselock.go | 12 +- .../k8s.io/client-go/tools/metrics/metrics.go | 17 + vendor/k8s.io/client-go/tools/pager/pager.go | 5 + vendor/k8s.io/client-go/tools/record/event.go | 53 +- vendor/k8s.io/client-go/tools/record/fake.go | 23 +- .../k8s.io/client-go/tools/reference/ref.go | 2 +- vendor/k8s.io/client-go/transport/cache.go | 16 +- .../k8s.io/client-go/transport/cache_go118.go | 24 + vendor/k8s.io/client-go/transport/config.go | 14 +- .../client-go/transport/token_source.go | 4 +- .../k8s.io/client-go/transport/transport.go | 28 +- vendor/k8s.io/client-go/util/cert/cert.go | 10 +- vendor/k8s.io/client-go/util/cert/io.go | 7 +- vendor/k8s.io/client-go/util/keyutil/key.go | 9 +- .../util/workqueue/delaying_queue.go | 61 +- .../client-go/util/workqueue/metrics.go | 9 +- .../k8s.io/client-go/util/workqueue/queue.go | 53 +- .../util/workqueue/rate_limiting_queue.go | 61 +- vendor/k8s.io/klog/v2/contextual.go | 30 +- vendor/k8s.io/klog/v2/format.go | 65 + .../k8s.io/klog/v2/internal/buffer/buffer.go | 75 +- .../klog/v2/internal/serialize/keyvalues.go | 232 +- vendor/k8s.io/klog/v2/k8s_references.go | 78 +- vendor/k8s.io/klog/v2/klog.go | 148 +- vendor/k8s.io/klog/v2/klogr.go | 12 +- .../k8s.io/kube-openapi/pkg/cached/cache.go | 281 + .../k8s.io/kube-openapi/pkg/common/common.go | 48 +- .../kube-openapi/pkg/handler3/handler.go | 245 +- .../k8s.io/kube-openapi/pkg/internal/flags.go | 4 + .../pkg/internal/handler/handler_cache.go | 57 - .../pkg/internal/serialization.go | 65 + .../go-json-experiment/json/arshal.go | 7 + .../go-json-experiment/json/arshal_any.go | 31 +- .../go-json-experiment/json/arshal_default.go | 147 +- .../go-json-experiment/json/arshal_inlined.go | 57 +- .../go-json-experiment/json/arshal_methods.go | 4 +- .../go-json-experiment/json/arshal_time.go | 99 +- .../go-json-experiment/json/decode.go | 12 +- .../go-json-experiment/json/doc.go | 9 +- .../go-json-experiment/json/encode.go | 24 + .../go-json-experiment/json/pools.go | 32 + .../go-json-experiment/json/state.go | 4 +- .../go-json-experiment/json/token.go | 10 +- .../go-json-experiment/json/value.go | 56 +- .../kube-openapi/pkg/schemaconv/openapi.go | 260 + .../pkg/schemaconv/proto_models.go | 178 + .../k8s.io/kube-openapi/pkg/schemaconv/smd.go | 306 +- .../k8s.io/kube-openapi/pkg/spec3/encoding.go | 22 +- .../k8s.io/kube-openapi/pkg/spec3/example.go | 25 +- .../pkg/spec3/external_documentation.go | 21 +- vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go | 254 + .../k8s.io/kube-openapi/pkg/spec3/header.go | 21 + .../kube-openapi/pkg/spec3/media_type.go | 22 +- .../kube-openapi/pkg/spec3/operation.go | 22 +- .../kube-openapi/pkg/spec3/parameter.go | 22 + vendor/k8s.io/kube-openapi/pkg/spec3/path.go | 82 +- .../kube-openapi/pkg/spec3/request_body.go | 23 +- .../k8s.io/kube-openapi/pkg/spec3/response.go | 121 +- .../pkg/spec3/security_requirement.go | 56 - .../kube-openapi/pkg/spec3/security_scheme.go | 2 +- .../k8s.io/kube-openapi/pkg/spec3/server.go | 41 +- vendor/k8s.io/kube-openapi/pkg/spec3/spec.go | 13 + .../pkg/util/proto/document_v3.go | 4 +- .../pkg/validation/spec/header.go | 23 +- .../kube-openapi/pkg/validation/spec/info.go | 33 +- .../kube-openapi/pkg/validation/spec/items.go | 53 +- .../pkg/validation/spec/operation.go | 36 +- .../pkg/validation/spec/parameter.go | 36 +- .../pkg/validation/spec/path_item.go | 28 +- .../kube-openapi/pkg/validation/spec/paths.go | 24 +- .../kube-openapi/pkg/validation/spec/ref.go | 18 +- .../pkg/validation/spec/response.go | 36 +- .../pkg/validation/spec/responses.go | 24 +- .../pkg/validation/spec/schema.go | 79 +- .../pkg/validation/spec/security_scheme.go | 20 +- .../pkg/validation/spec/swagger.go | 82 +- .../kube-openapi/pkg/validation/spec/tag.go | 19 +- vendor/k8s.io/utils/net/ipfamily.go | 181 + vendor/k8s.io/utils/net/net.go | 126 +- vendor/k8s.io/utils/net/port.go | 18 +- vendor/k8s.io/utils/pointer/OWNERS | 10 + vendor/k8s.io/utils/pointer/README.md | 3 + vendor/k8s.io/utils/pointer/pointer.go | 410 + vendor/k8s.io/utils/trace/trace.go | 49 +- vendor/knative.dev/pkg/apis/OWNERS | 14 +- vendor/knative.dev/pkg/apis/duck/OWNERS | 6 +- .../pkg/apis/duck/v1/addressable_types.go | 21 +- .../pkg/apis/duck/v1/destination.go | 11 + .../pkg/apis/duck/v1/knative_reference.go | 4 + .../pkg/apis/duck/v1/zz_generated.deepcopy.go | 29 +- vendor/knative.dev/pkg/apis/field_error.go | 7 +- vendor/knative.dev/pkg/changeset/commit.go | 5 +- .../client/injection/kube/client/client.go | 3703 ++++++- vendor/knative.dev/pkg/configmap/OWNERS | 6 - vendor/knative.dev/pkg/controller/OWNERS | 4 +- vendor/knative.dev/pkg/controller/options.go | 4 + vendor/knative.dev/pkg/hash/OWNERS | 7 - vendor/knative.dev/pkg/injection/OWNERS | 5 - .../knative.dev/pkg/injection/health_check.go | 109 + vendor/knative.dev/pkg/kmeta/OWNERS | 5 - vendor/knative.dev/pkg/kmeta/names.go | 17 +- vendor/knative.dev/pkg/logging/OWNERS | 6 - vendor/knative.dev/pkg/metrics/OWNERS | 10 - vendor/knative.dev/pkg/metrics/config.go | 47 +- .../pkg/metrics/config_observability.go | 71 + .../pkg/metrics/opencensus_exporter.go | 2 +- .../pkg/metrics/prometheus_exporter.go | 7 +- vendor/knative.dev/pkg/network/OWNERS | 5 - vendor/knative.dev/pkg/network/h2c.go | 7 +- vendor/knative.dev/pkg/reconciler/OWNERS | 4 +- vendor/modules.txt | 504 +- .../internal/golang/encoding/json/decode.go | 5 +- .../internal/golang/encoding/json/encode.go | 37 +- .../internal/golang/encoding/json/fold.go | 5 +- .../internal/golang/encoding/json/scanner.go | 2 + .../internal/golang/encoding/json/stream.go | 1 - vendor/sigs.k8s.io/json/json.go | 28 +- .../v4/merge/conflict.go | 121 + .../structured-merge-diff/v4/merge/update.go | 356 + 2388 files changed, 196666 insertions(+), 98102 deletions(-) delete mode 100644 vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md rename vendor/github.com/{mattn/go-colorable/LICENSE => Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt} (93%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md rename vendor/github.com/{mitchellh/copystructure/LICENSE => Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt} (86%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go rename vendor/github.com/{mitchellh/go-testing-interface/LICENSE => Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt} (86%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/TROUBLESHOOTING.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/build.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.security.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models_serde.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/response_types.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources-post.ps1 create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/time_unix.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/parse.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/auth/auth.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/CHANGELOG.md delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/dataplane_meta.json delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/enums.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/models.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/version.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/auth/README.md delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/convert.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/error.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/validation.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/doc.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/fs.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/security.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go delete mode 100644 vendor/github.com/armon/go-metrics/.gitignore delete mode 100644 vendor/github.com/armon/go-metrics/.travis.yml delete mode 100644 vendor/github.com/armon/go-metrics/LICENSE delete mode 100644 vendor/github.com/armon/go-metrics/README.md delete mode 100644 vendor/github.com/armon/go-metrics/const_unix.go delete mode 100644 vendor/github.com/armon/go-metrics/const_windows.go delete mode 100644 vendor/github.com/armon/go-metrics/inmem.go delete mode 100644 vendor/github.com/armon/go-metrics/inmem_endpoint.go delete mode 100644 vendor/github.com/armon/go-metrics/inmem_signal.go delete mode 100644 vendor/github.com/armon/go-metrics/metrics.go delete mode 100644 vendor/github.com/armon/go-metrics/sink.go delete mode 100644 vendor/github.com/armon/go-metrics/start.go delete mode 100644 vendor/github.com/armon/go-metrics/statsd.go delete mode 100644 vendor/github.com/armon/go-metrics/statsite.go delete mode 100644 vendor/github.com/armon/go-radix/.gitignore delete mode 100644 vendor/github.com/armon/go-radix/.travis.yml delete mode 100644 vendor/github.com/armon/go-radix/LICENSE delete mode 100644 vendor/github.com/armon/go-radix/README.md delete mode 100644 vendor/github.com/armon/go-radix/radix.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go delete mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service_grpc.pb.go delete mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service_grpc.pb.go create mode 100644 vendor/github.com/cespare/xxhash/v2/testall.sh create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s rename vendor/github.com/cespare/xxhash/v2/{xxhash_amd64.go => xxhash_asm.go} (73%) create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/build.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/types.go delete mode 100644 vendor/github.com/fatih/color/LICENSE.md delete mode 100644 vendor/github.com/fatih/color/README.md delete mode 100644 vendor/github.com/fatih/color/color.go delete mode 100644 vendor/github.com/fatih/color/doc.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/.gitignore create mode 100644 vendor/github.com/go-jose/go-jose/v3/.golangci.yml create mode 100644 vendor/github.com/go-jose/go-jose/v3/.travis.yml create mode 100644 vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/LICENSE create mode 100644 vendor/github.com/go-jose/go-jose/v3/README.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/asymmetric.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/crypter.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/doc.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/encoding.go rename vendor/github.com/{golang/snappy => go-jose/go-jose/v3/json}/LICENSE (95%) create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/README.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/decode.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/encode.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/indent.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/scanner.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/stream.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/tags.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwe.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwk.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jws.go rename vendor/{gopkg.in/square/go-jose.v2 => github.com/go-jose/go-jose/v3}/jwt/builder.go (97%) rename vendor/{gopkg.in/square/go-jose.v2 => github.com/go-jose/go-jose/v3}/jwt/claims.go (91%) rename vendor/{gopkg.in/square/go-jose.v2 => github.com/go-jose/go-jose/v3}/jwt/doc.go (100%) rename vendor/{gopkg.in/square/go-jose.v2 => github.com/go-jose/go-jose/v3}/jwt/errors.go (54%) rename vendor/{gopkg.in/square/go-jose.v2 => github.com/go-jose/go-jose/v3}/jwt/jwt.go (80%) rename vendor/{gopkg.in/square/go-jose.v2 => github.com/go-jose/go-jose/v3}/jwt/validation.go (79%) create mode 100644 vendor/github.com/go-jose/go-jose/v3/opaque.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/shared.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/signing.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/symmetric.go delete mode 100644 vendor/github.com/go-openapi/jsonpointer/.travis.yml delete mode 100644 vendor/github.com/go-openapi/jsonreference/.travis.yml delete mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go delete mode 100644 vendor/github.com/golang/snappy/.gitignore delete mode 100644 vendor/github.com/golang/snappy/AUTHORS delete mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/snappy/README delete mode 100644 vendor/github.com/golang/snappy/decode.go delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/decode_arm64.s delete mode 100644 vendor/github.com/golang/snappy/decode_asm.go delete mode 100644 vendor/github.com/golang/snappy/decode_other.go delete mode 100644 vendor/github.com/golang/snappy/encode.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/encode_arm64.s delete mode 100644 vendor/github.com/golang/snappy/encode_asm.go delete mode 100644 vendor/github.com/golang/snappy/encode_other.go delete mode 100644 vendor/github.com/golang/snappy/snappy.go create mode 100644 vendor/github.com/google/go-containerregistry/internal/estargz/estargz.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/puller.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/schema1.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go create mode 100644 vendor/github.com/google/s2a-go/.gitignore create mode 100644 vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/google/s2a-go/CONTRIBUTING.md create mode 100644 vendor/github.com/google/s2a-go/LICENSE.md create mode 100644 vendor/github.com/google/s2a-go/README.md create mode 100644 vendor/github.com/google/s2a-go/fallback/s2a_fallback.go create mode 100644 vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go create mode 100644 vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go create mode 100644 vendor/github.com/google/s2a-go/internal/handshaker/service/service.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/record.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/ticketsender.go create mode 100644 vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go create mode 100644 vendor/github.com/google/s2a-go/internal/v2/README.md create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem create mode 100644 vendor/github.com/google/s2a-go/internal/v2/s2av2.go create mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem create mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem create mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem create mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go create mode 100644 vendor/github.com/google/s2a-go/s2a.go create mode 100644 vendor/github.com/google/s2a-go/s2a_options.go create mode 100644 vendor/github.com/google/s2a-go/s2a_utils.go create mode 100644 vendor/github.com/google/s2a-go/stream/s2a_stream.go create mode 100644 vendor/github.com/google/s2a-go/testdata/client_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/client_key.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/server_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/server_key.pem create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/.gitignore delete mode 100644 vendor/github.com/hashicorp/go-hclog/README.md delete mode 100644 vendor/github.com/hashicorp/go-hclog/colorize_unix.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/colorize_windows.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/context.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/exclude.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/global.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/interceptlogger.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/intlogger.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/logger.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/nulllogger.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/stacktrace.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/stdlog.go delete mode 100644 vendor/github.com/hashicorp/go-hclog/writer.go delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/.gitignore delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/README.md delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/edges.go delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iradix.go delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iter.go delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/node.go delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go delete mode 100644 vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/.gitignore delete mode 100644 vendor/github.com/hashicorp/go-plugin/CHANGELOG.md delete mode 100644 vendor/github.com/hashicorp/go-plugin/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-plugin/README.md delete mode 100644 vendor/github.com/hashicorp/go-plugin/client.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/discover.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/error.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_broker.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_client.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_controller.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_server.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_stdio.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto delete mode 100644 vendor/github.com/hashicorp/go-plugin/log_entry.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/mtls.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/mux_broker.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/plugin.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/process.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/process_posix.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/process_windows.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/protocol.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/rpc_client.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/rpc_server.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/server.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/server_mux.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/stream.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/testing.go delete mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go delete mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go delete mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go delete mode 100644 vendor/github.com/hashicorp/go-uuid/.travis.yml delete mode 100644 vendor/github.com/hashicorp/go-uuid/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-uuid/README.md delete mode 100644 vendor/github.com/hashicorp/go-uuid/uuid.go delete mode 100644 vendor/github.com/hashicorp/go-version/CHANGELOG.md delete mode 100644 vendor/github.com/hashicorp/go-version/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-version/README.md delete mode 100644 vendor/github.com/hashicorp/go-version/constraint.go delete mode 100644 vendor/github.com/hashicorp/go-version/version.go delete mode 100644 vendor/github.com/hashicorp/go-version/version_collection.go rename vendor/github.com/hashicorp/vault/{sdk/helper/consts => api}/plugin_types.go (84%) delete mode 100644 vendor/github.com/hashicorp/vault/sdk/LICENSE delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/deprecation_status.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/token_consts.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.pb.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.proto delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing_grpc.pb.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/audit.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/auth.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/connection.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/error.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/identity.proto delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/lease.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/logical.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/managed_key.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/request.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/response.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/response_util.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/secret.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/storage.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/system_view.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/testing.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/token.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/version.pb.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/version.proto delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/version_grpc.pb.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/cache.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/encoding.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/entry.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/error.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/latency.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/physical.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/testing.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/transactions.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/version/cgo.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/version/version.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/version/version_base.go delete mode 100644 vendor/github.com/hashicorp/yamux/.gitignore delete mode 100644 vendor/github.com/hashicorp/yamux/LICENSE delete mode 100644 vendor/github.com/hashicorp/yamux/README.md delete mode 100644 vendor/github.com/hashicorp/yamux/addr.go delete mode 100644 vendor/github.com/hashicorp/yamux/const.go delete mode 100644 vendor/github.com/hashicorp/yamux/mux.go delete mode 100644 vendor/github.com/hashicorp/yamux/session.go delete mode 100644 vendor/github.com/hashicorp/yamux/spec.md delete mode 100644 vendor/github.com/hashicorp/yamux/stream.go delete mode 100644 vendor/github.com/hashicorp/yamux/util.go delete mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go delete mode 100644 vendor/github.com/jellydator/ttlcache/v2/Readme.md delete mode 100644 vendor/github.com/jellydator/ttlcache/v2/cache.go delete mode 100644 vendor/github.com/jellydator/ttlcache/v2/evictionreason_enumer.go delete mode 100644 vendor/github.com/jellydator/ttlcache/v2/item.go delete mode 100644 vendor/github.com/jellydator/ttlcache/v2/metrics.go delete mode 100644 vendor/github.com/jellydator/ttlcache/v2/priority_queue.go rename vendor/github.com/jellydator/ttlcache/{v2 => v3}/CHANGELOG.md (100%) rename vendor/github.com/jellydator/ttlcache/{v2 => v3}/LICENSE (100%) create mode 100644 vendor/github.com/jellydator/ttlcache/v3/README.md create mode 100644 vendor/github.com/jellydator/ttlcache/v3/cache.go create mode 100644 vendor/github.com/jellydator/ttlcache/v3/expiration_queue.go create mode 100644 vendor/github.com/jellydator/ttlcache/v3/item.go create mode 100644 vendor/github.com/jellydator/ttlcache/v3/metrics.go create mode 100644 vendor/github.com/jellydator/ttlcache/v3/options.go create mode 100644 vendor/github.com/kylelemons/godebug/LICENSE create mode 100644 vendor/github.com/kylelemons/godebug/diff/diff.go create mode 100644 vendor/github.com/kylelemons/godebug/pretty/.gitignore create mode 100644 vendor/github.com/kylelemons/godebug/pretty/doc.go create mode 100644 vendor/github.com/kylelemons/godebug/pretty/public.go create mode 100644 vendor/github.com/kylelemons/godebug/pretty/reflect.go create mode 100644 vendor/github.com/kylelemons/godebug/pretty/structure.go delete mode 100644 vendor/github.com/mattn/go-colorable/README.md delete mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go delete mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go delete mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go delete mode 100644 vendor/github.com/mattn/go-colorable/go.test.sh delete mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go delete mode 100644 vendor/github.com/mattn/go-isatty/LICENSE delete mode 100644 vendor/github.com/mattn/go-isatty/README.md delete mode 100644 vendor/github.com/mattn/go-isatty/doc.go delete mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_plan9.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_tcgets.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go delete mode 100644 vendor/github.com/mitchellh/copystructure/README.md delete mode 100644 vendor/github.com/mitchellh/copystructure/copier_time.go delete mode 100644 vendor/github.com/mitchellh/copystructure/copystructure.go delete mode 100644 vendor/github.com/mitchellh/go-testing-interface/.travis.yml delete mode 100644 vendor/github.com/mitchellh/go-testing-interface/README.md delete mode 100644 vendor/github.com/mitchellh/go-testing-interface/testing.go delete mode 100644 vendor/github.com/mitchellh/reflectwalk/.travis.yml delete mode 100644 vendor/github.com/mitchellh/reflectwalk/LICENSE delete mode 100644 vendor/github.com/mitchellh/reflectwalk/README.md delete mode 100644 vendor/github.com/mitchellh/reflectwalk/location.go delete mode 100644 vendor/github.com/mitchellh/reflectwalk/location_string.go delete mode 100644 vendor/github.com/mitchellh/reflectwalk/reflectwalk.go delete mode 100644 vendor/github.com/oklog/run/.gitignore delete mode 100644 vendor/github.com/oklog/run/README.md delete mode 100644 vendor/github.com/oklog/run/actors.go delete mode 100644 vendor/github.com/oklog/run/group.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go delete mode 100644 vendor/github.com/pierrec/lz4/.gitignore delete mode 100644 vendor/github.com/pierrec/lz4/.travis.yml delete mode 100644 vendor/github.com/pierrec/lz4/README.md delete mode 100644 vendor/github.com/pierrec/lz4/block.go delete mode 100644 vendor/github.com/pierrec/lz4/debug.go delete mode 100644 vendor/github.com/pierrec/lz4/debug_stub.go delete mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.go delete mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.s delete mode 100644 vendor/github.com/pierrec/lz4/decode_other.go delete mode 100644 vendor/github.com/pierrec/lz4/errors.go delete mode 100644 vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4_go1.10.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4_notgo1.10.go delete mode 100644 vendor/github.com/pierrec/lz4/reader.go delete mode 100644 vendor/github.com/pierrec/lz4/reader_legacy.go delete mode 100644 vendor/github.com/pierrec/lz4/writer.go delete mode 100644 vendor/github.com/pierrec/lz4/writer_legacy.go rename vendor/github.com/{pierrec/lz4 => pkg/browser}/LICENSE (85%) create mode 100644 vendor/github.com/pkg/browser/README.md create mode 100644 vendor/github.com/pkg/browser/browser.go create mode 100644 vendor/github.com/pkg/browser/browser_darwin.go create mode 100644 vendor/github.com/pkg/browser/browser_freebsd.go create mode 100644 vendor/github.com/pkg/browser/browser_linux.go create mode 100644 vendor/github.com/pkg/browser/browser_netbsd.go create mode 100644 vendor/github.com/pkg/browser/browser_openbsd.go create mode 100644 vendor/github.com/pkg/browser/browser_unsupported.go create mode 100644 vendor/github.com/pkg/browser/browser_windows.go create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/LICENSE create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/LICENSE create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/LICENSE create mode 100644 vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/LICENSE rename vendor/github.com/{oklog/run => spiffe/go-spiffe/v2}/LICENSE (100%) create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/doc.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/set.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/source.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/doc.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/set.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/source.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/bundle.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/doc.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/set.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/source.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/internal/cryptoutil/keys.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/internal/jwtutil/util.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/internal/pemutil/pem.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/internal/x509util/util.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/logger/logger.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/logger/null.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/logger/std.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/logger/writer.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.pb.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.proto create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload_grpc.pb.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/spiffeid/charset_backcompat_allow.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/spiffeid/charset_backcompat_deny.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/spiffeid/errors.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/spiffeid/id.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/spiffeid/match.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/spiffeid/path.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/spiffeid/require.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/spiffeid/trustdomain.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/authorizer.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/config.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/trace.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/source.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/svid.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/source.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/svid.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/verify.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr_posix.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr_windows.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/backoff.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/bundlesource.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_posix.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_windows.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/convenience.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/jwtsource.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option_windows.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/watcher.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/x509context.go create mode 100644 vendor/github.com/spiffe/go-spiffe/v2/workloadapi/x509source.go rename vendor/github.com/{Azure/go-autorest/autorest/to => spiffe/spire-api-sdk}/LICENSE (94%) create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.proto create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry_grpc.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.proto create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.proto create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.proto create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.proto create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.proto create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.proto create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.proto create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.proto create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.proto create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.proto create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.proto create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.pb.go create mode 100644 vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.proto delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/artifact_bucket.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/artifact_pvc.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/contexts.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/events.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/trusted_resources.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/constant.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/matrix_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/matrix_types.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_paths.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/resource.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/git/git_resource.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/image/image_resource.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipelineresource_validation.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/storage/artifact_bucket.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/storage/artifact_pvc.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/storage/gcs.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/storage/secret.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/storage/storage.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/artifacts/artifacts_storage.go rename vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/{v1beta1 => v1}/taskrun/fake/fake.go (94%) rename vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/{v1beta1 => v1}/taskrun/taskrun.go (71%) rename vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/{v1beta1 => v1}/taskrun/controller.go (96%) rename vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/{v1beta1 => v1}/taskrun/reconciler.go (84%) rename vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/{v1beta1 => v1}/taskrun/state.go (95%) delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/clientset.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/scheme/register.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1/pipelineresource.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/factory.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/generic.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/internalinterfaces/factory_interfaces.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource/v1alpha1/pipelineresource.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/injection/client/client.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/factory/factory.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/resource/v1alpha1/pipelineresource/pipelineresource.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/listers/resource/v1alpha1/pipelineresource.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/internal/affinityassistant/affinityassistant_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/reconciler/constant.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources/input_output_steps.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/image_exporter.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/input_resources.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/output_resource.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/taskresourceresolution.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/validate_params.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/volume.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/validate_resources.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/reconciler/taskrun/validate_taskrun.go delete mode 100644 vendor/github.com/tektoncd/pipeline/pkg/remote/oci/resolver.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/resolution/common/interface.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/result/result.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/spire/controller.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/spire/entrypointer.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/spire/sign.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/spire/spire.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/spire/spire_mock.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/spire/verify.go create mode 100644 vendor/github.com/vbatts/tar-split/LICENSE create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/common.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/format.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/reader.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/strconv.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/writer.go create mode 100644 vendor/github.com/zeebo/errs/.gitignore create mode 100644 vendor/github.com/zeebo/errs/AUTHORS rename vendor/github.com/{hashicorp/go-hclog => zeebo/errs}/LICENSE (97%) create mode 100644 vendor/github.com/zeebo/errs/README.md create mode 100644 vendor/github.com/zeebo/errs/errs.go create mode 100644 vendor/github.com/zeebo/errs/group.go create mode 100644 vendor/go.opentelemetry.io/otel/.codespellignore create mode 100644 vendor/go.opentelemetry.io/otel/.codespellrc create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/handler.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/instruments.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/meter.go create mode 100644 vendor/go.opentelemetry.io/otel/metric.go rename vendor/{github.com/Azure/go-autorest/autorest/validation => go.opentelemetry.io/otel/metric}/LICENSE (94%) create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/config.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/meter.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/requirements.txt create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/version.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/version.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/register.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_compat.go create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_go120.go create mode 100644 vendor/golang.org/x/crypto/hkdf/hkdf.go create mode 100644 vendor/golang.org/x/exp/LICENSE create mode 100644 vendor/golang.org/x/exp/PATENTS create mode 100644 vendor/golang.org/x/exp/maps/maps.go create mode 100644 vendor/golang.org/x/net/http2/writesched_roundrobin.go create mode 100644 vendor/golang.org/x/sync/errgroup/go120.go create mode 100644 vendor/golang.org/x/sync/errgroup/pre_go120.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_signed.go rename vendor/golang.org/x/sys/unix/{ioctl.go => ioctl_unsigned.go} (76%) create mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go create mode 100644 vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go create mode 100644 vendor/google.golang.org/api/internal/cba.go rename vendor/google.golang.org/api/{transport => internal}/cert/default_cert.go (100%) rename vendor/google.golang.org/api/{transport => internal}/cert/enterprise_cert.go (93%) rename vendor/google.golang.org/api/{transport => internal}/cert/secureconnect_cert.go (98%) create mode 100644 vendor/google.golang.org/api/internal/s2a.go delete mode 100644 vendor/google.golang.org/api/transport/http/configure_http2_go116.go delete mode 100644 vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go delete mode 100644 vendor/google.golang.org/api/transport/internal/dca/dca.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/api/tidyfix.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/kms/v1/alias.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/LICENSE create mode 100644 vendor/google.golang.org/genproto/internal/doc.go create mode 100644 vendor/google.golang.org/grpc/idle.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/duration.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/logging.go delete mode 100644 vendor/google.golang.org/grpc/reflection/README.md delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go delete mode 100644 vendor/google.golang.org/grpc/reflection/serverreflection.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go create mode 100644 vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/doc.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/register.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/types.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go rename vendor/{github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/storage => k8s.io/api/authentication/v1alpha1}/zz_generated.deepcopy.go (56%) create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go rename vendor/{github.com/tektoncd/pipeline/pkg/apis/pipeline/storagetypes.go => k8s.io/api/flowcontrol/v1beta3/doc.go} (59%) create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/register.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/types.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/doc.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/generated.pb.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/generated.proto create mode 100644 vendor/k8s.io/api/resource/v1alpha2/register.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/types.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/set.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/delay.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/error.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/loop.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/poll.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/timer.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go rename vendor/k8s.io/client-go/applyconfigurations/admissionregistration/{v1beta1/rulewithoperations.go => v1alpha1/namedrulewithoperations.go} (55%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go rename vendor/k8s.io/client-go/applyconfigurations/{extensions/v1beta1/allowedflexvolume.go => apps/v1beta1/statefulsetordinals.go} (50%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go rename vendor/k8s.io/client-go/applyconfigurations/{autoscaling/v2/podresourcemetricsource.go => core/v1/podresourceclaim.go} (51%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go rename vendor/k8s.io/client-go/applyconfigurations/{extensions/v1beta1/allowedcsidriver.go => core/v1/resourceclaim.go} (65%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go rename vendor/k8s.io/client-go/applyconfigurations/{extensions/v1beta1/podsecuritypolicy.go => networking/v1alpha1/ipaddress.go} (66%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go create mode 100644 vendor/k8s.io/client-go/discovery/aggregated_discovery.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go rename vendor/{github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource => k8s.io/client-go/informers/certificates}/v1alpha1/interface.go (69%) create mode 100644 vendor/k8s.io/client-go/informers/doc.go rename vendor/k8s.io/client-go/informers/{extensions/v1beta1/podsecuritypolicy.go => flowcontrol/v1beta3/flowschema.go} (51%) create mode 100644 vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/interface.go create mode 100644 vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go rename vendor/{github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions => k8s.io/client-go/informers}/resource/interface.go (68%) create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go rename vendor/{github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource => k8s.io/client-go/kubernetes/typed/admissionregistration}/v1alpha1/doc.go (95%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go rename vendor/{github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/scheme => k8s.io/client-go/kubernetes/typed/authentication/v1alpha1}/doc.go (82%) rename vendor/{github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource => k8s.io/client-go/kubernetes/typed/authentication}/v1alpha1/generated_expansion.go (88%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go rename vendor/{github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1/resource_client.go => k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go} (61%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go rename vendor/{github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/doc.go => k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go} (83%) delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go rename vendor/{github.com/tektoncd/pipeline/pkg/client/resource/listers/resource => k8s.io/client-go/listers/admissionregistration}/v1alpha1/expansion_generated.go (61%) create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/listers/certificates/v1alpha1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go rename vendor/{github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipeline_resource_defaults.go => k8s.io/client-go/listers/flowcontrol/v1beta3/expansion_generated.go} (56%) create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/openapi/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go create mode 100644 vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go create mode 100644 vendor/k8s.io/client-go/transport/cache_go118.go create mode 100644 vendor/k8s.io/klog/v2/format.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/cached/cache.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/serialization.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go create mode 100644 vendor/k8s.io/utils/net/ipfamily.go create mode 100644 vendor/k8s.io/utils/pointer/OWNERS create mode 100644 vendor/k8s.io/utils/pointer/README.md create mode 100644 vendor/k8s.io/utils/pointer/pointer.go delete mode 100644 vendor/knative.dev/pkg/configmap/OWNERS delete mode 100644 vendor/knative.dev/pkg/hash/OWNERS delete mode 100644 vendor/knative.dev/pkg/injection/OWNERS create mode 100644 vendor/knative.dev/pkg/injection/health_check.go delete mode 100644 vendor/knative.dev/pkg/kmeta/OWNERS delete mode 100644 vendor/knative.dev/pkg/logging/OWNERS delete mode 100644 vendor/knative.dev/pkg/metrics/OWNERS delete mode 100644 vendor/knative.dev/pkg/network/OWNERS create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go diff --git a/go.mod b/go.mod index 119c2af5..c6875a1b 100644 --- a/go.mod +++ b/go.mod @@ -3,197 +3,203 @@ module github.com/vdemeester/buildkit-tekton go 1.18 require ( - github.com/docker/cli v23.0.1+incompatible - github.com/docker/distribution v2.8.1+incompatible + github.com/docker/cli v24.0.0+incompatible + github.com/docker/distribution v2.8.2+incompatible github.com/google/go-cmp v0.5.9 github.com/moby/buildkit v0.11.5 github.com/moby/term v0.0.0-20221205130635-1aeaba878587 github.com/pkg/errors v0.9.1 - github.com/sirupsen/logrus v1.9.0 - github.com/spf13/cobra v1.6.1 - github.com/tektoncd/pipeline v0.45.0 + github.com/sirupsen/logrus v1.9.1 + github.com/spf13/cobra v1.7.0 + github.com/tektoncd/pipeline v0.50.0 github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea - golang.org/x/sync v0.1.0 - k8s.io/api v0.25.4 - k8s.io/apimachinery v0.25.4 - k8s.io/client-go v0.25.4 + golang.org/x/sync v0.3.0 + k8s.io/api v0.27.1 + k8s.io/apimachinery v0.27.1 + k8s.io/client-go v0.27.1 ) require ( - cloud.google.com/go/compute v1.14.0 // indirect + cloud.google.com/go/compute v1.19.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.8.0 // indirect - cloud.google.com/go/kms v1.8.0 // indirect + cloud.google.com/go/iam v1.1.0 // indirect + cloud.google.com/go/kms v1.12.1 // indirect contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect - github.com/Azure/azure-sdk-for-go v67.3.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v0.12.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.28 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect + github.com/Azure/go-autorest/autorest v0.11.29 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect - github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/Microsoft/go-winio v0.6.0 // indirect - github.com/armon/go-metrics v0.4.1 // indirect - github.com/armon/go-radix v1.0.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.17.3 // indirect - github.com/aws/aws-sdk-go-v2/config v1.18.8 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.8 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 // indirect - github.com/aws/aws-sdk-go-v2/service/ecr v1.17.18 // indirect - github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.17 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 // indirect - github.com/aws/aws-sdk-go-v2/service/kms v1.20.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.18.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/aws/aws-sdk-go-v2 v1.18.1 // indirect + github.com/aws/aws-sdk-go-v2/config v1.18.27 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.26 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.18.11 // indirect + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.16.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.22.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 // indirect github.com/aws/smithy-go v1.13.5 // indirect - github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20221004211355-a250ad2ca1e3 // indirect + github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blendle/zapdriver v1.3.1 // indirect github.com/cenkalti/backoff/v3 v3.2.2 // indirect - github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/chrismellard/docker-credential-acr-env v0.0.0-20221002210726-e883f69e0206 // indirect - github.com/cloudevents/sdk-go/v2 v2.13.0 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect + github.com/cloudevents/sdk-go/v2 v2.14.0 // indirect github.com/containerd/console v1.0.3 // indirect - github.com/containerd/containerd v1.6.18 // indirect + github.com/containerd/containerd v1.6.19 // indirect github.com/containerd/continuity v0.3.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect github.com/containerd/typeurl v1.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/docker v23.0.1+incompatible // indirect + github.com/docker/docker v24.0.0+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.10.2 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/go-jose/go-jose/v3 v3.0.0 // indirect github.com/go-kit/log v0.2.0 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.4.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/golang/snappy v0.0.4 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-containerregistry v0.13.0 // indirect - github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20221030203717-1711cefd7eec // indirect - github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20221017135236-9b4fdd506cdd // indirect + github.com/google/go-containerregistry v0.15.2 // indirect + github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230625233257-b8504803389b // indirect + github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/s2a-go v0.1.4 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.4 // indirect + github.com/googleapis/gax-go/v2 v2.11.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.3.1 // indirect - github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.4.6 // indirect - github.com/hashicorp/go-retryablehttp v0.7.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.2 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 // indirect github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/vault/api v1.8.2 // indirect - github.com/hashicorp/vault/sdk v0.6.1 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect + github.com/hashicorp/vault/api v1.9.2 // indirect github.com/imdario/mergo v0.3.13 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect - github.com/jellydator/ttlcache/v2 v2.11.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jellydator/ttlcache/v3 v3.0.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect - github.com/klauspost/compress v1.15.12 // indirect + github.com/klauspost/compress v1.16.5 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.16 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/patternmatcher v0.5.0 // indirect github.com/moby/sys/signal v0.7.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/oklog/run v1.1.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc2 // indirect - github.com/pierrec/lz4 v2.6.1+incompatible // indirect + github.com/opencontainers/image-spec v1.1.0-rc4 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/prometheus/statsd_exporter v0.21.0 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect - github.com/sigstore/sigstore v1.5.1 // indirect + github.com/sigstore/sigstore v1.7.1 // indirect + github.com/sigstore/sigstore/pkg/signature/kms/aws v1.7.1 // indirect + github.com/sigstore/sigstore/pkg/signature/kms/azure v1.7.1 // indirect + github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.1 // indirect + github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4 // indirect + github.com/spiffe/go-spiffe/v2 v2.1.5 // indirect + github.com/spiffe/spire-api-sdk v1.7.0 // indirect + github.com/theupdateframework/go-tuf v0.5.2 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa // indirect github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f // indirect + github.com/vbatts/tar-split v0.11.3 // indirect + github.com/zeebo/errs v1.3.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 // indirect - go.opentelemetry.io/otel v1.13.0 // indirect + go.opentelemetry.io/otel v1.16.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect - go.opentelemetry.io/otel/sdk v1.13.0 // indirect - go.opentelemetry.io/otel/trace v1.13.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/sdk v1.16.0 // indirect + go.opentelemetry.io/otel/trace v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.12.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.6.0 // indirect - golang.org/x/mod v0.7.0 // indirect - golang.org/x/net v0.6.0 // indirect - golang.org/x/oauth2 v0.4.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/term v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect - golang.org/x/time v0.2.0 // indirect - golang.org/x/tools v0.5.0 // indirect + golang.org/x/crypto v0.10.0 // indirect + golang.org/x/exp v0.0.0-20230307190834-24139beb5833 // indirect + golang.org/x/mod v0.10.0 // indirect + golang.org/x/net v0.11.0 // indirect + golang.org/x/oauth2 v0.9.0 // indirect + golang.org/x/sys v0.9.0 // indirect + golang.org/x/term v0.9.0 // indirect + golang.org/x/text v0.10.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.8.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - google.golang.org/api v0.107.0 // indirect + google.golang.org/api v0.128.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230109162033-3c3c17ce83e6 // indirect - google.golang.org/grpc v1.51.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/grpc v1.56.2 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect - k8s.io/utils v0.0.0-20221012122500-cfd413dd9e85 // indirect - knative.dev/pkg v0.0.0-20221123011842-b78020c16606 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5 // indirect + k8s.io/utils v0.0.0-20230505201702-9f6742963106 // indirect + knative.dev/pkg v0.0.0-20230418073056-dfad48eaa5d0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 17682bca..dcaf4a6b 100644 --- a/go.sum +++ b/go.sum @@ -13,24 +13,23 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= +cloud.google.com/go v0.110.2 h1:sdFPBr6xG9/wkBbfhmUz/JmZC7X6LavQgcrVINrKiVA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.19.3 h1:DcTwsFgGev/wV5+q8o2fzgcHOaac+DKGC91ZlvpsQds= +cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/kms v1.8.0 h1:VrJLOsMRzW7IqTTYn+OYupqF3iKSE060Nrn+PECrYjg= -cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= -cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= +cloud.google.com/go/iam v1.1.0 h1:67gSqaPukx7O8WLLHMa0PNs3EBGd2eE4d+psbO/CO94= +cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= +cloud.google.com/go/kms v1.12.1 h1:xZmZuwy2cwzsocmKDOPu4BL7umg8QXagQx6fKVmf45U= +cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -45,20 +44,31 @@ contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/g contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v67.3.0+incompatible h1:QEvenaO+Y9ShPeCWsSAtolzVUcb0T0tPeek5TDsovuM= -github.com/Azure/azure-sdk-for-go v67.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 h1:SEy2xmstIphdPwNBUi7uhvjyjhVKISfwjfOJmuy7kg4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v0.12.0 h1:4Kynh6Hn2ekyIsBgNQJb3dn1+/MyvzfUJebti2emB/A= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v0.12.0/go.mod h1:Q28U+75mpCaSCDowNEmhIo/rmgdkqmkmzI7N6TGR4UY= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0/go.mod h1:cw4zVQgBby0Z5f2v0itn6se2dDP17nTjbZFXW5uPyHA= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= -github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= @@ -67,20 +77,18 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= -github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= -github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.9.7 h1:mKNHW/Xvv1aFH87Jb6ERDzXTJTLPlmzfZ28VBFD/bfg= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -88,56 +96,51 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= -github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.44.180 h1:VLZuAHI9fa/3WME5JjpVjcPCNfpGHVMiHx8sLHWhMgI= -github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= -github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY= -github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2/config v1.17.8/go.mod h1:UkCI3kb0sCdvtjiXYiU4Zx5h07BOpgBTtkPu/49r+kA= -github.com/aws/aws-sdk-go-v2/config v1.18.8 h1:lDpy0WM8AHsywOnVrOHaSMfpaiV2igOw8D7svkFkXVA= -github.com/aws/aws-sdk-go-v2/config v1.18.8/go.mod h1:5XCmmyutmzzgkpk/6NYTjeWb6lgo9N170m1j6pQkIBs= -github.com/aws/aws-sdk-go-v2/credentials v1.12.21/go.mod h1:O+4XyAt4e+oBAoIwNUYkRg3CVMscaIJdmZBOcPgJ8D8= -github.com/aws/aws-sdk-go-v2/credentials v1.13.8 h1:vTrwTvv5qAwjWIGhZDSBH/oQHuIQjGmD232k01FUh6A= -github.com/aws/aws-sdk-go-v2/credentials v1.13.8/go.mod h1:lVa4OHbvgjVot4gmh1uouF1ubgexSCN92P6CJQpT0t8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 h1:j9wi1kQ8b+e0FBVHxCqCGo4kxDU175hoDHcWAi0sauU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21/go.mod h1:ugwW57Z5Z48bpvUyZuaPy4Kv+vEfJWnIrky7RmkBvJg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 h1:I3cakv2Uy1vNmmhRQmFptYDxOvBnwCdNwyw63N0RaRU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 h1:5NbbMrIzmUn/TXFqAle6mgrH5m9cOvMLRGL7pnG8tRE= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 h1:KeTxcGdNnQudb46oOl4d90f2I33DF/c6q3RnZAmvQdQ= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28/go.mod h1:yRZVr/iT0AqyHeep00SZ4YfBAKojXz08w3XMBscdi0c= -github.com/aws/aws-sdk-go-v2/service/ecr v1.17.18 h1:uiF/RI+Up8H2xdgT2GWa20YzxiKEalHieqNjm6HC3Xk= -github.com/aws/aws-sdk-go-v2/service/ecr v1.17.18/go.mod h1:DQtDYmexqR+z+B6HBCvY7zK/tuXKv6Zy/IwOXOK3eow= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.17 h1:bcQy5/dcJO8VQD+p0tDoIYdgEC3ch9f1/BNRES7XMug= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.17/go.mod h1:r1Vuka0kyzqN0sZm4lYTXf0Vhl+o/mTLq6vKpBBZYaQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 h1:5C6XgTViSb0bunmU57b3CT+MhxULqHH2721FVA+/kDM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21/go.mod h1:lRToEJsn+DRA9lW4O9L9+/3hjTkUzlzyzHqn8MTds5k= -github.com/aws/aws-sdk-go-v2/service/kms v1.20.0 h1:1mEQ1BVRfxU2KzcUUIzqDQ8p6yPkhzHrHT++sjtLJts= -github.com/aws/aws-sdk-go-v2/service/kms v1.20.0/go.mod h1:13sjgMH7Xu4e46+0BEDhSnNh+cImHSYS5PpBjV3oXcU= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 h1:/2gzjhQowRLarkkBOGPXSRnb8sQ2RVsjdG1C/UliK/c= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.0/go.mod h1:wo/B7uUm/7zw/dWhBJ4FXuw1sySU5lyIhVg1Bu2yL9A= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 h1:Jfly6mRxk2ZOSlbCvZfKNS7TukSx1mIzhSsqZ/IGSZI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0/go.mod h1:TZSH7xLO7+phDtViY/KUp9WGCJMQkLJ/VpgkTFd5gh8= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.0 h1:kOO++CYo50RcTFISESluhWEi5Prhg+gaSs4whWabiZU= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.0/go.mod h1:+lGbb3+1ugwKrNTWcf2RT05Xmp543B06zDFTwiTLp7I= -github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/aws-sdk-go v1.44.288 h1:Ln7fIao/nl0ACtelgR1I4AiEw/GLNkKcXfCaHupUW5Q= +github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.18.1 h1:+tefE750oAb7ZQGzla6bLkOwfcQCEtC5y2RqoqCeqKo= +github.com/aws/aws-sdk-go-v2 v1.18.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4= +github.com/aws/aws-sdk-go-v2/config v1.18.27 h1:Az9uLwmssTE6OGTpsFqOnaGpLnKDqNYOJzWuC6UAYzA= +github.com/aws/aws-sdk-go-v2/config v1.18.27/go.mod h1:0My+YgmkGxeqjXZb5BYme5pc4drjTnM+x1GJ3zv42Nw= +github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o= +github.com/aws/aws-sdk-go-v2/credentials v1.13.26 h1:qmU+yhKmOCyujmuPY7tf5MxR/RKyZrOPO3V4DobiTUk= +github.com/aws/aws-sdk-go-v2/credentials v1.13.26/go.mod h1:GoXt2YC8jHUBbA4jr+W3JiemnIbkXOfxSXcisUsZ3os= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4 h1:LxK/bitrAr4lnh9LnIS6i7zWbCOdMsfzKFBI6LUCS0I= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4/go.mod h1:E1hLXN/BL2e6YizK1zFlYd8vsfi2GTjbjBazinMmeaM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 h1:A5UqQEmPaCFpedKouS4v+dHCTUo2sKqhoKO9U5kxyWo= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34/go.mod h1:wZpTEecJe0Btj3IYnDx/VlUzor9wm3fJHyvLpQF0VwY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 h1:srIVS45eQuewqz6fKKu6ZGXaq6FuFg5NzgQBAM6g8Y4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28/go.mod h1:7VRpKQQedkfIEXb4k52I7swUnZP0wohVajJMRn3vsUw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35 h1:LWA+3kDM8ly001vJ1X1waCuLJdtTl48gwkPKWy9sosI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35/go.mod h1:0Eg1YjxE0Bhn56lx+SHJwCzhW+2JGtizsrx+lCqrfm0= +github.com/aws/aws-sdk-go-v2/service/ecr v1.18.11 h1:wlTgmb/sCmVRJrN5De3CiHj4v/bTCgL5+qpdEd0CPtw= +github.com/aws/aws-sdk-go-v2/service/ecr v1.18.11/go.mod h1:Ce1q2jlNm8BVpjLaOnwnm5v2RClAbK6txwPljFzyW6c= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.16.2 h1:yflJrGmi1pXtP9lOpOeaNZyc0vXnJTuP2sor3nJcGGo= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.16.2/go.mod h1:uHtRE7aqXNmpeYL+7Ec7LacH5zC9+w2T5MBOeEKDdu0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 h1:bkRyG4a929RCnpVSTvLM2j/T4ls015ZhhYApbmYs15s= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28/go.mod h1:jj7znCIg05jXlaGBlFMGP8+7UN3VtCkRBG2spnmRQkU= +github.com/aws/aws-sdk-go-v2/service/kms v1.22.2 h1:jwmtdM1/l1DRNy5jQrrYpsQm8zwetkgeqhAqefDr1yI= +github.com/aws/aws-sdk-go-v2/service/kms v1.22.2/go.mod h1:aNfh11Smy55o65PB3MyKbkM8BFyFUcZmj1k+4g8eNfg= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 h1:nneMBM2p79PGWBQovYO/6Xnc2ryRMw3InnDJq1FHkSY= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.12/go.mod h1:HuCOxYsF21eKrerARYO6HapNeh9GBNq7fius2AcwodY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12 h1:2qTR7IFk7/0IN/adSFhYu9Xthr0zVFTgBrmPldILn80= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12/go.mod h1:E4VrHCPzmVB/KFXtqBGKb3c8zpbNBgKe3fisDNLAW5w= +github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= +github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 h1:XFJ2Z6sNUUcAz9poj+245DMkrHE4h2j5I9/xD50RHfE= +github.com/aws/aws-sdk-go-v2/service/sts v1.19.2/go.mod h1:dp0yLPsLBOi++WTxzCjA/oZqi6NPIhoR+uF7GeMU9eg= github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20221004211355-a250ad2ca1e3 h1:Ted/bR1N6ltMrASdwRhX1BrGYSFg3aeGMlK8GlgkGh4= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20221004211355-a250ad2ca1e3/go.mod h1:m06KtrZgOloUaePAQMv+Ha8kRmTnKdozTHZrweepIrw= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 h1:G5IT+PEpFY0CDb3oITDP9tkmLrHkVD8Ny+elUmBqVYI= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7/go.mod h1:VVALgT1UESBh91dY0GprHnT1Z7mKd96VDk8qVy+bmu0= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -151,44 +154,46 @@ github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTx github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chrismellard/docker-credential-acr-env v0.0.0-20221002210726-e883f69e0206 h1:lG6Usi/kX/JBZzGz1H+nV+KwM97vThQeKunCbS6PutU= -github.com/chrismellard/docker-credential-acr-env v0.0.0-20221002210726-e883f69e0206/go.mod h1:1UmFRnmMnVsHwD+ZntmLkoVBB1ZLa6V+XXEbF6hZCxU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= +github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudevents/sdk-go/v2 v2.13.0 h1:2zxDS8RyY1/wVPULGGbdgniGXSzLaRJVl136fLXGsYw= -github.com/cloudevents/sdk-go/v2 v2.13.0/go.mod h1:xDmKfzNjM8gBvjaF8ijFjM1VYOVUEeUfapHMUX1T5To= +github.com/cloudevents/sdk-go/v2 v2.14.0 h1:Nrob4FwVgi5L4tV9lhjzZcjYqFVyJzsA56CwPaPfv6s= +github.com/cloudevents/sdk-go/v2 v2.14.0/go.mod h1:xDmKfzNjM8gBvjaF8ijFjM1VYOVUEeUfapHMUX1T5To= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.6.18 h1:qZbsLvmyu+Vlty0/Ex5xc0z2YtKpIsb5n45mAMI+2Ns= -github.com/containerd/containerd v1.6.18/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw= +github.com/containerd/containerd v1.6.19 h1:F0qgQPrG0P2JPgwpxWxYavrVeXAG0ezUIB9Z/4FTUAU= +github.com/containerd/containerd v1.6.19/go.mod h1:HZCDMn4v/Xl2579/MvtOC2M206i+JJ6VxFWU/NetrGY= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU= github.com/containerd/nydus-snapshotter v0.3.1 h1:b8WahTrPkt3XsabjG2o/leN4fw3HWZYr+qxo/Z8Mfzk= -github.com/containerd/stargz-snapshotter v0.13.0 h1:3zr1/IkW1aEo6cMYTQeZ4L2jSuCN+F4kgGfjnuowe4U= -github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw= +github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= +github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI= github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -196,27 +201,30 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/docker/cli v23.0.1+incompatible h1:LRyWITpGzl2C9e9uGxzisptnxAn1zfZKXy13Ul2Q5oM= -github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY= -github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/docker/cli v24.0.0+incompatible h1:0+1VshNwBQzQAx9lOl+OYCTCEAD8fKs/qeXMx3O0wqM= +github.com/docker/cli v24.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.0+incompatible h1:z4bf8HvONXX9Tde5lGBMQ7yCJgNahmJumdrStZAbeY4= +github.com/docker/docker v24.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= +github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -228,13 +236,13 @@ github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGF github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -247,19 +255,18 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= @@ -270,9 +277,10 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= -github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -300,10 +308,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= @@ -318,16 +325,17 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k= -github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo= -github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20221030203717-1711cefd7eec h1:GtokKAIyg0S9Y/60CO+gHUBHwYblbwfaUS3Qr+AejTc= -github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20221030203717-1711cefd7eec/go.mod h1:7QLaBZxN+nMCx82XO5R7qPHq0m61liEg8yca68zymHo= -github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20221017135236-9b4fdd506cdd h1:+nq85YWt99EkBpsKV+ABoAzxM7My/uOKHModpV/mwgs= -github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20221017135236-9b4fdd506cdd/go.mod h1:k/wl/uGzWEl8kLqUOWSnKe9QL/10YKnuwHMNZHnXhfY= +github.com/google/go-containerregistry v0.15.2 h1:MMkSh+tjSdnmJZO7ljvEqV1DjfekB6VUEAZgy3a+TQE= +github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230625233257-b8504803389b h1:ptt4Cmxx6HsJQUSRp0LRB8nAxMdn9mxnqhc4dxwYlSM= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230625233257-b8504803389b/go.mod h1:Ek+8PQrShkA7aHEj3/zSW33wU0V/Bx3zW/gFh7l21xY= +github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa h1:+MG+Q2Q7mtW6kCIbUPZ9ZMrj7xOWDKI1hhy1qp0ygI0= +github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa/go.mod h1:KdL98/Va8Dy1irB6lTxIRIQ7bQj4lbrlvqUzKEQ+ZBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -340,48 +348,43 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.4 h1:uGy6JWR/uMIILU8wbf+OkstIrNiMjGpEIyhx8f6W7s4= +github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo= github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.6 h1:MDV3UrKQBM3du3G7MApDGvOsMYy3JQJ4exhSoKBAeVA= -github.com/hashicorp/go-plugin v1.4.6/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= -github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= +github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= @@ -389,35 +392,25 @@ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.2 h1:C7OL9YtOtwQbTKI9ogB0A1wffRbCN+rH/LLCHO3d8HM= -github.com/hashicorp/vault/api v1.8.2/go.mod h1:ML8aYzBIhY5m1MD1B2Q0JV89cC85YVH4t5kBaZiyVaE= -github.com/hashicorp/vault/sdk v0.6.1 h1:sjZC1z4j5Rh2GXYbkxn5BLK05S1p7+MhW4AgdUmgRUA= -github.com/hashicorp/vault/sdk v0.6.1/go.mod h1:Ck4JuAC6usTphfrrRJCRH+7/N7O2ozZzkm/fzQFt4uM= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= +github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc= github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jellydator/ttlcache/v2 v2.11.1 h1:AZGME43Eh2Vv3giG6GeqeLeFXxwxn1/qHItqWZl6U64= -github.com/jellydator/ttlcache/v2 v2.11.1/go.mod h1:RtE5Snf0/57e+2cLWFYWCCsLas2Hy3c5Z4n14XmSvTI= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jellydator/ttlcache/v3 v3.0.1 h1:cHgCSMS7TdQcoprXnWUptJZzyFsqs18Lt8VVhRuZYVU= +github.com/jellydator/ttlcache/v3 v3.0.1/go.mod h1:WwTaEmcXQ3MTjOm4bsZoDFiCu/hMvNWLO1w67RXz6h4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.14.0 h1:MBbQK392K3u8NTLbKOCIi3XdI+y+c6yt5oMq0X3xviw= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -427,7 +420,6 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -440,21 +432,23 @@ github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dv github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= -github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf h1:ndns1qx/5dL43g16EQkPV/i8+b3l5bYQwLeoSBe7tS8= github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -471,18 +465,12 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/buildkit v0.11.5 h1:S6YrFJ0bfBT2w9e8kOxqsDV8Bw+HtfqdB6eHL17BXRI= github.com/moby/buildkit v0.11.5/go.mod h1:P5Qi041LvCfhkfYBHry+Rwoo3Wi6H971J2ggE+PcIoo= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= @@ -506,26 +494,21 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= -github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= +github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= +github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= -github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= +github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin/zipkin-go v0.3.0 h1:XtuXmOLIXLjiU2XduuWREDT0LOKtSgos/g7i7RYyoZQ= github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 h1:DiLBVp4DAcZlBVBEtJpNWZpZVq0AEeCY7Hqk8URVs4o= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= -github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= -github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -535,7 +518,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= @@ -548,7 +530,6 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= @@ -557,7 +538,6 @@ github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8 github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= @@ -567,31 +547,43 @@ github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= +github.com/secure-systems-lab/go-securesystemslib v0.6.0 h1:T65atpAVCJQK14UA57LMdZGpHi4QYSH/9FZyNGqMYIA= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= -github.com/sigstore/sigstore v1.5.1 h1:iUou0QJW8eQKMUkTXbFyof9ZOblDtfaW2Sn2+QI8Tcs= -github.com/sigstore/sigstore v1.5.1/go.mod h1:3i6UTWVNtFwOtbgG63FZZNID4vO9KcO8AszIJlaNI8k= +github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXxks= +github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.7.1 h1:rDHrG/63b3nBq3G9plg7iYnWN6lBhOfq/XultlCZgII= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.7.1/go.mod h1:hl0LRidnJG1uL1lLSHGEjcs+MxLjT65NJ7pX/TQDIsk= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.7.1 h1:X3ezwolP+b1jP3R6XPOWhUU0TZKONiv6EIRuySlZGrY= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.7.1/go.mod h1:SG2NPEdX2Vi7CBp/o93kJqXrovkis/T9ou9oxZONyEA= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.1 h1:mj1KhdzzP1me994bt1UXhq5KZGSR1SoqxTqcT+hfPMk= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.1/go.mod h1:Z7LFrKKfj5ZPhy0YS9HcI4H6kbUQzBsE3e3hR+R3YY8= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.1 h1:fhOToGY5fC5TY101an8i/oDYpoLzUJ1nUFwhnHA1+XY= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.1/go.mod h1:SN4QZHHDs2VqXh5bRXrIi8vqLbOijIp2XoSlmV/WJ/c= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.1 h1:Ou41VVR3nMWWmTiEUnj0OlsgOSCUFgsPAOl6jRIcVtQ= +github.com/sirupsen/logrus v1.9.1/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f h1:9B623Cfs+mclYK6dsae7gLSwuIBHvlgmEup87qpqsAQ= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spiffe/go-spiffe/v2 v2.1.5 h1:nFzp6pllCxpso6A2CaokdjhmH3uHWMNL9DGYXeZrShs= +github.com/spiffe/go-spiffe/v2 v2.1.5/go.mod h1:eVDqm9xFvyqao6C+eQensb9ZPkyNEeaUbqbBpOhBnNk= +github.com/spiffe/spire-api-sdk v1.7.0 h1:ThqXfS5dt29Noq8Sjr65CPtIk7Pe7LLoLr7S8g671Ic= +github.com/spiffe/spire-api-sdk v1.7.0/go.mod h1:4uuhFlN6KBWjACRP3xXwrOTNnvaLp1zJs8Lribtr4fI= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -602,12 +594,13 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/tektoncd/pipeline v0.45.0 h1:Hv9kyutu5GWGXKtcMrM7PXdAULgeQc0F2HWDNg+jo5c= -github.com/tektoncd/pipeline v0.45.0/go.mod h1:20Xs6qk3BTpsLHYWEtLNPM44XKqNH5jYwoomXHOGNs8= -github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4 h1:1i/Afw3rmaR1gF3sfVkG2X6ldkikQwA9zY380LrR5YI= -github.com/theupdateframework/go-tuf v0.5.2-0.20220930112810-3890c1e7ace4/go.mod h1:vAqWV3zEs89byeFsAYoh/Q14vJTgJkHwnnRCWBBBINY= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/tektoncd/pipeline v0.50.0 h1:bH04XChFOYCwr7Gm6f2NiDkieHUiGohKCGsLe0fxYf0= +github.com/tektoncd/pipeline v0.50.0/go.mod h1:OjhCfhPQbVvK6GUmIseL2ipjaQ8ILcUerMk4P4sCcHA= +github.com/theupdateframework/go-tuf v0.5.2 h1:habfDzTmpbzBLIFGWa2ZpVhYvFBoK0C1onC3a4zuPRA= +github.com/theupdateframework/go-tuf v0.5.2/go.mod h1:SyMV5kg5n4uEclsyxXJZI2UxPFJNDc4Y+r7wv+MlvTA= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa h1:XOFp/3aBXlqmOFAg3r6e0qQjPnK5I970LilqX+Is1W8= @@ -616,9 +609,10 @@ github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/ github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f h1:DLpt6B5oaaS8jyXHa9VA4rrZloBVPVXeCtrOsrFauxc= github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= -github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= +github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= +github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -628,6 +622,9 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs= +github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -640,18 +637,20 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.2 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0/go.mod h1:LsankqVDx4W+RhZNA5uWarULII/MBhF5qwCYxTuyXjs= go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk= go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= -go.opentelemetry.io/otel v1.13.0 h1:1ZAKnNQKwBBxFtww/GwxNUyTf0AxkZzrukO8MeXqe4Y= -go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 h1:WPpPsAAs8I2rA47v5u0558meKmmwm1Dj99ZbqCV8sZ8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1/go.mod h1:o5RW5o2pKpJLD5dNTCmjF1DorYwMeFJmb/rKr5sLaa8= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE= -go.opentelemetry.io/otel/sdk v1.13.0 h1:BHib5g8MvdqS65yo2vV1s6Le42Hm6rrw08qU6yz5JaM= -go.opentelemetry.io/otel/sdk v1.13.0/go.mod h1:YLKPx5+6Vx/o1TCUYYs+bpymtkmazOMT6zoRrC7AQ7I= +go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= +go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE= go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= -go.opentelemetry.io/otel/trace v1.13.0 h1:CBgRZ6ntv+Amuj1jDsMhZtlAPT6gbyIRdaIzFhfBSdY= -go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c= go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ= @@ -659,7 +658,6 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= @@ -671,13 +669,16 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -688,6 +689,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s= +golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -700,7 +703,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -709,8 +711,9 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -749,8 +752,10 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -758,8 +763,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs= +golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -770,9 +775,9 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -808,6 +813,7 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -815,6 +821,7 @@ golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -823,15 +830,20 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -840,13 +852,15 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.2.0 h1:52I/1L54xyEQAYdtcSuxtiT84KGYTBGXwayxmIpNJhE= -golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -862,7 +876,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -890,9 +903,9 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -916,8 +929,8 @@ google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.107.0 h1:I2SlFjD8ZWabaIFOfeEDg3pf0BHJDh6iYQ1ic3Yu/UU= -google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.128.0 h1:RjPESny5CnQRn9V6siglged+DZCgfu9l6mO9dkX9VOg= +google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -959,8 +972,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230109162033-3c3c17ce83e6 h1:uUn6GsgKK2eCI0bWeRMgRCcqDaQXYDuB+5tXA5Xeg/8= -google.golang.org/genproto v0.0.0-20230109162033-3c3c17ce83e6/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -979,8 +996,10 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -994,14 +1013,16 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -1030,25 +1051,25 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.25.4 h1:3YO8J4RtmG7elEgaWMb4HgmpS2CfY1QlaOz9nwB+ZSs= -k8s.io/api v0.25.4/go.mod h1:IG2+RzyPQLllQxnhzD8KQNEu4c4YvyDTpSMztf4A0OQ= -k8s.io/apimachinery v0.25.4 h1:CtXsuaitMESSu339tfhVXhQrPET+EiWnIY1rcurKnAc= -k8s.io/apimachinery v0.25.4/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= -k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8= -k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/utils v0.0.0-20221012122500-cfd413dd9e85 h1:cTdVh7LYu82xeClmfzGtgyspNh6UxpwLWGi8R4sspNo= -k8s.io/utils v0.0.0-20221012122500-cfd413dd9e85/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -knative.dev/pkg v0.0.0-20221123011842-b78020c16606 h1:bHozIhJtQE9/SVQkVX2vqnoJGJt6zo7J0jnkLFX3Crw= -knative.dev/pkg v0.0.0-20221123011842-b78020c16606/go.mod h1:DMTRDJ5WRxf/DrlOPzohzfhSuJggscLZ8EavOq9O/x8= +k8s.io/api v0.27.1 h1:Z6zUGQ1Vd10tJ+gHcNNNgkV5emCyW+v2XTmn+CLjSd0= +k8s.io/api v0.27.1/go.mod h1:z5g/BpAiD+f6AArpqNjkY+cji8ueZDU/WV1jcj5Jk4E= +k8s.io/apimachinery v0.27.1 h1:EGuZiLI95UQQcClhanryclaQE6xjg1Bts6/L3cD7zyc= +k8s.io/apimachinery v0.27.1/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM= +k8s.io/client-go v0.27.1 h1:oXsfhW/qncM1wDmWBIuDzRHNS2tLhK3BZv512Nc59W8= +k8s.io/client-go v0.27.1/go.mod h1:f8LHMUkVb3b9N8bWturc+EDtVVVwZ7ueTVquFAJb2vA= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5 h1:azYPdzztXxPSa8wb+hksEKayiz0o+PPisO/d+QhWnoo= +k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5/go.mod h1:kzo02I3kQ4BTtEfVLaPbjvCkX97YqGve33wzlb3fofQ= +k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= +k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +knative.dev/pkg v0.0.0-20230418073056-dfad48eaa5d0 h1:EFQcoUo8I4bc+U3y6tR1B3ONYZSHWUdAfI7Vh7dae8g= +knative.dev/pkg v0.0.0-20230418073056-dfad48eaa5d0/go.mod h1:2qWPP9Gjh9Q7ETti+WRHnBnGCSCq+6q7m3p/nmUQviE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index c9ba9182..7513e24c 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.14.0" +const Version = "1.19.3" diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index ced21782..d18a339a 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,5 +1,62 @@ # Changes + +## [1.1.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.0.1...iam/v1.1.0) (2023-05-30) + + +### Features + +* **iam:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6)) + +## [1.0.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.0.0...iam/v1.0.1) (2023-05-08) + + +### Bug Fixes + +* **iam:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef)) + +## [1.0.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.13.0...iam/v1.0.0) (2023-04-04) + + +### Features + +* **iam:** Promote to GA ([#7627](https://github.com/googleapis/google-cloud-go/issues/7627)) ([b351906](https://github.com/googleapis/google-cloud-go/commit/b351906a10e17a02d7f7e2551bc1585fd9dc3742)) + +## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.12.0...iam/v0.13.0) (2023-03-15) + + +### Features + +* **iam:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd)) + +## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.11.0...iam/v0.12.0) (2023-02-17) + + +### Features + +* **iam:** Migrate to new stubs ([a61ddcd](https://github.com/googleapis/google-cloud-go/commit/a61ddcd3041c7af4a15109dc4431f9b327c497fb)) + +## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.10.0...iam/v0.11.0) (2023-02-16) + + +### Features + +* **iam:** Start generating proto stubs ([970d763](https://github.com/googleapis/google-cloud-go/commit/970d763531b54b2bc75d7ff26a20b6e05150cab8)) + +## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.9.0...iam/v0.10.0) (2023-01-04) + + +### Features + +* **iam:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) + +## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.8.0...iam/v0.9.0) (2022-12-15) + + +### Features + +* **iam:** Rewrite iam sigs and update proto import ([#7137](https://github.com/googleapis/google-cloud-go/issues/7137)) ([ad67fa3](https://github.com/googleapis/google-cloud-go/commit/ad67fa36c263c161226f7fecbab5221592374dca)) + ## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05) diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 2793098a..fdcca8a5 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc-gen-go v1.30.0 +// protoc v4.23.1 // source: google/iam/v1/iam_policy.proto package iampb @@ -342,37 +342,37 @@ var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{ 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x9a, 0x01, 0x0a, 0x12, + 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, + 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x9a, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, - 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, - 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, + 0x01, 0x2a, 0x22, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, 0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x86, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, - 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, - 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x7f, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, + 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index 835f2171..7c91cc59 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc-gen-go v1.30.0 +// protoc v4.23.1 // source: google/iam/v1/options.proto package iampb @@ -111,16 +111,16 @@ var file_google_iam_v1_options_proto_rawDesc = []byte{ 0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, - 0x42, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, - 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x7d, 0x0a, 0x11, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, + 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, + 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index ec7777a7..8b82b0f5 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.5 +// protoc-gen-go v1.30.0 +// protoc v4.23.1 // source: google/iam/v1/policy.proto package iampb @@ -214,7 +214,8 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // only if the expression evaluates to `true`. A condition can add constraints // based on attributes of the request, the resource, or both. To learn which // resources support conditions in their IAM policies, see the -// [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). +// [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). // // **JSON example:** // @@ -237,7 +238,8 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // "condition": { // "title": "expirable access", // "description": "Does not grant access after Sep 2020", -// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", +// "expression": "request.time < +// timestamp('2020-10-01T00:00:00.000Z')", // } // } // ], @@ -279,11 +281,11 @@ type Policy struct { // Any operation that affects conditional role bindings must specify version // `3`. This requirement applies to the following operations: // - // - Getting a policy that includes a conditional role binding - // - Adding a conditional role binding to a policy - // - Changing a conditional role binding in a policy - // - Removing any role binding, with or without a condition, from a policy - // that includes conditions + // * Getting a policy that includes a conditional role binding + // * Adding a conditional role binding to a policy + // * Changing a conditional role binding in a policy + // * Removing any role binding, with or without a condition, from a policy + // that includes conditions // // **Important:** If you use IAM Conditions, you must include the `etag` field // whenever you call `setIamPolicy`. If you omit this field, then IAM allows @@ -294,7 +296,8 @@ type Policy struct { // specify any valid version or leave the field unset. // // To learn which resources support conditions in their IAM policies, see the - // [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + // [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // Associates a list of `members`, or principals, with a `role`. Optionally, // may specify a `condition` that determines how and when the `bindings` are @@ -396,43 +399,47 @@ type Binding struct { // Specifies the principals requesting access for a Cloud Platform resource. // `members` can have the following values: // - // - `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. + // * `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. + // + // * `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. + // + // * `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . + // + // + // * `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. // - // - `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. + // * `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. // - // - `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . + // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a user that has been recently deleted. For + // example, `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered user + // retains the role in the binding. // - // - `serviceAccount:{emailid}`: An email address that represents a service - // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a service account that has been recently + // deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains the + // role in the binding. // - // - `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. + // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a Google group that has been recently + // deleted. For example, `admins@example.com?uid=123456789012345678901`. If + // the group is recovered, this value reverts to `group:{emailid}` and the + // recovered group retains the role in the binding. // - // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a user that has been recently deleted. For - // example, `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered user - // retains the role in the binding. // - // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus - // unique identifier) representing a service account that has been recently - // deleted. For example, - // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account retains the - // role in the binding. + // * `domain:{domain}`: The G Suite domain (primary) that represents all the + // users of that domain. For example, `google.com` or `example.com`. // - // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a Google group that has been recently - // deleted. For example, `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` and the - // recovered group retains the role in the binding. // - // - `domain:{domain}`: The G Suite domain (primary) that represents all the - // users of that domain. For example, `google.com` or `example.com`. Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` // The condition that is associated with this binding. // @@ -640,7 +647,8 @@ type AuditLogConfig struct { LogType AuditLogConfig_LogType `protobuf:"varint,1,opt,name=log_type,json=logType,proto3,enum=google.iam.v1.AuditLogConfig_LogType" json:"log_type,omitempty"` // Specifies the identities that do not cause logging for this type of // permission. - // Follows the same format of [Binding.members][google.iam.v1.Binding.members]. + // Follows the same format of + // [Binding.members][google.iam.v1.Binding.members]. ExemptedMembers []string `protobuf:"bytes,2,rep,name=exempted_members,json=exemptedMembers,proto3" json:"exempted_members,omitempty"` } @@ -999,16 +1007,15 @@ var file_google_iam_v1_policy_proto_rawDesc = []byte{ 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, - 0x45, 0x10, 0x02, 0x42, 0x83, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, - 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x45, 0x10, 0x02, 0x42, 0x7c, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, + 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, + 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go index 0a06ea2e..f004a7af 100644 --- a/vendor/cloud.google.com/go/iam/iam.go +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -26,8 +26,8 @@ import ( "fmt" "time" + pb "cloud.google.com/go/iam/apiv1/iampb" gax "github.com/googleapis/gax-go/v2" - pb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" diff --git a/vendor/cloud.google.com/go/kms/apiv1/doc.go b/vendor/cloud.google.com/go/kms/apiv1/doc.go index 8c0d7430..e701dab3 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/doc.go +++ b/vendor/cloud.google.com/go/kms/apiv1/doc.go @@ -20,6 +20,11 @@ // Manages keys and performs cryptographic operations in a central cloud // service, for direct use by other cloud resources and applications. // +// # General documentation +// +// For information about setting deadlines, reusing contexts, and more +// please visit https://pkg.go.dev/cloud.google.com/go. +// // # Example usage // // To get started with this package, create a client. @@ -73,6 +78,11 @@ // _ = resp // } // +// # Inspecting errors +// +// To see examples of how to inspect errors returned by this package please reference +// [Inspecting errors](https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors). +// // # Use of Context // // The ctx passed to NewEkmClient is used for authentication requests and @@ -80,20 +90,11 @@ // Individual methods on the client use the ctx given to them. // // To close the open connection, use the Close() method. -// -// For information about setting deadlines, reusing contexts, and more -// please visit https://pkg.go.dev/cloud.google.com/go. package kms // import "cloud.google.com/go/kms/apiv1" import ( "context" - "fmt" "net/http" - "os" - "runtime" - "strconv" - "strings" - "unicode" "google.golang.org/api/option" "google.golang.org/grpc/metadata" @@ -124,16 +125,6 @@ func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { return metadata.NewOutgoingContext(ctx, out) } -func checkDisableDeadlines() (bool, error) { - raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") - if !ok { - return false, nil - } - - b, err := strconv.ParseBool(raw) - return b, err -} - // DefaultAuthScopes reports the default set of authentication scopes to use with this package. func DefaultAuthScopes() []string { return []string{ @@ -142,52 +133,6 @@ func DefaultAuthScopes() []string { } } -// versionGo returns the Go runtime version. The returned string -// has no whitespace, suitable for reporting in header. -func versionGo() string { - const develPrefix = "devel +" - - s := runtime.Version() - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } - - notSemverRune := func(r rune) bool { - return !strings.ContainsRune("0123456789.", r) - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - s += "-" + prerelease - } - return s - } - return "UNKNOWN" -} - -// maybeUnknownEnum wraps the given proto-JSON parsing error if it is the result -// of receiving an unknown enum value. -func maybeUnknownEnum(err error) error { - if strings.Contains(err.Error(), "invalid value for enum type") { - err = fmt.Errorf("received an unknown enum value; a later version of the library may support it: %w", err) - } - return err -} - // buildHeaders extracts metadata from the outgoing context, joins it with any other // given metadata, and converts them into a http.Header. func buildHeaders(ctx context.Context, mds ...metadata.MD) http.Header { diff --git a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go index 8cc29e70..fb8de925 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go @@ -20,12 +20,13 @@ import ( "bytes" "context" "fmt" - "io/ioutil" + "io" "math" "net/http" "net/url" "time" + iampb "cloud.google.com/go/iam/apiv1/iampb" kmspb "cloud.google.com/go/kms/apiv1/kmspb" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" @@ -35,7 +36,6 @@ import ( gtransport "google.golang.org/api/transport/grpc" httptransport "google.golang.org/api/transport/http" locationpb "google.golang.org/genproto/googleapis/cloud/location" - iampb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" @@ -51,6 +51,9 @@ type EkmCallOptions struct { GetEkmConnection []gax.CallOption CreateEkmConnection []gax.CallOption UpdateEkmConnection []gax.CallOption + GetEkmConfig []gax.CallOption + UpdateEkmConfig []gax.CallOption + VerifyConnectivity []gax.CallOption GetLocation []gax.CallOption ListLocations []gax.CallOption GetIamPolicy []gax.CallOption @@ -73,6 +76,7 @@ func defaultEkmGRPCClientOptions() []option.ClientOption { func defaultEkmCallOptions() *EkmCallOptions { return &EkmCallOptions{ ListEkmConnections: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -85,6 +89,7 @@ func defaultEkmCallOptions() *EkmCallOptions { }), }, GetEkmConnection: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -97,6 +102,7 @@ func defaultEkmCallOptions() *EkmCallOptions { }), }, CreateEkmConnection: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -109,6 +115,7 @@ func defaultEkmCallOptions() *EkmCallOptions { }), }, UpdateEkmConnection: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -120,6 +127,9 @@ func defaultEkmCallOptions() *EkmCallOptions { }) }), }, + GetEkmConfig: []gax.CallOption{}, + UpdateEkmConfig: []gax.CallOption{}, + VerifyConnectivity: []gax.CallOption{}, GetLocation: []gax.CallOption{}, ListLocations: []gax.CallOption{}, GetIamPolicy: []gax.CallOption{}, @@ -131,6 +141,7 @@ func defaultEkmCallOptions() *EkmCallOptions { func defaultEkmRESTCallOptions() *EkmCallOptions { return &EkmCallOptions{ ListEkmConnections: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -142,6 +153,7 @@ func defaultEkmRESTCallOptions() *EkmCallOptions { }), }, GetEkmConnection: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -153,6 +165,7 @@ func defaultEkmRESTCallOptions() *EkmCallOptions { }), }, CreateEkmConnection: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -164,6 +177,7 @@ func defaultEkmRESTCallOptions() *EkmCallOptions { }), }, UpdateEkmConnection: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -174,6 +188,9 @@ func defaultEkmRESTCallOptions() *EkmCallOptions { http.StatusGatewayTimeout) }), }, + GetEkmConfig: []gax.CallOption{}, + UpdateEkmConfig: []gax.CallOption{}, + VerifyConnectivity: []gax.CallOption{}, GetLocation: []gax.CallOption{}, ListLocations: []gax.CallOption{}, GetIamPolicy: []gax.CallOption{}, @@ -191,6 +208,9 @@ type internalEkmClient interface { GetEkmConnection(context.Context, *kmspb.GetEkmConnectionRequest, ...gax.CallOption) (*kmspb.EkmConnection, error) CreateEkmConnection(context.Context, *kmspb.CreateEkmConnectionRequest, ...gax.CallOption) (*kmspb.EkmConnection, error) UpdateEkmConnection(context.Context, *kmspb.UpdateEkmConnectionRequest, ...gax.CallOption) (*kmspb.EkmConnection, error) + GetEkmConfig(context.Context, *kmspb.GetEkmConfigRequest, ...gax.CallOption) (*kmspb.EkmConfig, error) + UpdateEkmConfig(context.Context, *kmspb.UpdateEkmConfigRequest, ...gax.CallOption) (*kmspb.EkmConfig, error) + VerifyConnectivity(context.Context, *kmspb.VerifyConnectivityRequest, ...gax.CallOption) (*kmspb.VerifyConnectivityResponse, error) GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error) ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) @@ -260,6 +280,27 @@ func (c *EkmClient) UpdateEkmConnection(ctx context.Context, req *kmspb.UpdateEk return c.internalClient.UpdateEkmConnection(ctx, req, opts...) } +// GetEkmConfig returns the EkmConfig singleton resource +// for a given project and location. +func (c *EkmClient) GetEkmConfig(ctx context.Context, req *kmspb.GetEkmConfigRequest, opts ...gax.CallOption) (*kmspb.EkmConfig, error) { + return c.internalClient.GetEkmConfig(ctx, req, opts...) +} + +// UpdateEkmConfig updates the EkmConfig singleton resource +// for a given project and location. +func (c *EkmClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEkmConfigRequest, opts ...gax.CallOption) (*kmspb.EkmConfig, error) { + return c.internalClient.UpdateEkmConfig(ctx, req, opts...) +} + +// VerifyConnectivity verifies that Cloud KMS can successfully connect to the external key +// manager specified by an EkmConnection. +// If there is an error connecting to the EKM, this method returns a +// FAILED_PRECONDITION status containing structured information as described +// at https://cloud.google.com/kms/docs/reference/ekm_errors (at https://cloud.google.com/kms/docs/reference/ekm_errors). +func (c *EkmClient) VerifyConnectivity(ctx context.Context, req *kmspb.VerifyConnectivityRequest, opts ...gax.CallOption) (*kmspb.VerifyConnectivityResponse, error) { + return c.internalClient.VerifyConnectivity(ctx, req, opts...) +} + // GetLocation gets information about a location. func (c *EkmClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { return c.internalClient.GetLocation(ctx, req, opts...) @@ -303,9 +344,6 @@ type ekmGRPCClient struct { // Connection pool of gRPC connections to the service. connPool gtransport.ConnPool - // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE - disableDeadlines bool - // Points back to the CallOptions field of the containing EkmClient CallOptions **EkmCallOptions @@ -339,11 +377,6 @@ func NewEkmClient(ctx context.Context, opts ...option.ClientOption) (*EkmClient, clientOpts = append(clientOpts, hookOpts...) } - disableDeadlines, err := checkDisableDeadlines() - if err != nil { - return nil, err - } - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) if err != nil { return nil, err @@ -351,12 +384,11 @@ func NewEkmClient(ctx context.Context, opts ...option.ClientOption) (*EkmClient, client := EkmClient{CallOptions: defaultEkmCallOptions()} c := &ekmGRPCClient{ - connPool: connPool, - disableDeadlines: disableDeadlines, - ekmClient: kmspb.NewEkmServiceClient(connPool), - CallOptions: &client.CallOptions, - iamPolicyClient: iampb.NewIAMPolicyClient(connPool), - locationsClient: locationpb.NewLocationsClient(connPool), + connPool: connPool, + ekmClient: kmspb.NewEkmServiceClient(connPool), + CallOptions: &client.CallOptions, + iamPolicyClient: iampb.NewIAMPolicyClient(connPool), + locationsClient: locationpb.NewLocationsClient(connPool), } c.setGoogleClientInfo() @@ -377,7 +409,7 @@ func (c *ekmGRPCClient) Connection() *grpc.ClientConn { // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *ekmGRPCClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } @@ -442,7 +474,7 @@ func defaultEkmRESTClientOptions() []option.ClientOption { // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *ekmRESTClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } @@ -507,11 +539,6 @@ func (c *ekmGRPCClient) ListEkmConnections(ctx context.Context, req *kmspb.ListE } func (c *ekmGRPCClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -529,11 +556,6 @@ func (c *ekmGRPCClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmC } func (c *ekmGRPCClient) CreateEkmConnection(ctx context.Context, req *kmspb.CreateEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -551,11 +573,6 @@ func (c *ekmGRPCClient) CreateEkmConnection(ctx context.Context, req *kmspb.Crea } func (c *ekmGRPCClient) UpdateEkmConnection(ctx context.Context, req *kmspb.UpdateEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "ekm_connection.name", url.QueryEscape(req.GetEkmConnection().GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -572,6 +589,57 @@ func (c *ekmGRPCClient) UpdateEkmConnection(ctx context.Context, req *kmspb.Upda return resp, nil } +func (c *ekmGRPCClient) GetEkmConfig(ctx context.Context, req *kmspb.GetEkmConfigRequest, opts ...gax.CallOption) (*kmspb.EkmConfig, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetEkmConfig[0:len((*c.CallOptions).GetEkmConfig):len((*c.CallOptions).GetEkmConfig)], opts...) + var resp *kmspb.EkmConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.GetEkmConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEkmConfigRequest, opts ...gax.CallOption) (*kmspb.EkmConfig, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "ekm_config.name", url.QueryEscape(req.GetEkmConfig().GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).UpdateEkmConfig[0:len((*c.CallOptions).UpdateEkmConfig):len((*c.CallOptions).UpdateEkmConfig)], opts...) + var resp *kmspb.EkmConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.UpdateEkmConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) VerifyConnectivity(ctx context.Context, req *kmspb.VerifyConnectivityRequest, opts ...gax.CallOption) (*kmspb.VerifyConnectivityResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).VerifyConnectivity[0:len((*c.CallOptions).VerifyConnectivity):len((*c.CallOptions).VerifyConnectivity)], opts...) + var resp *kmspb.VerifyConnectivityResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.VerifyConnectivity(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + func (c *ekmGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) @@ -745,13 +813,13 @@ func (c *ekmRESTClient) ListEkmConnections(ctx context.Context, req *kmspb.ListE return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -821,13 +889,13 @@ func (c *ekmRESTClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmC return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -888,13 +956,13 @@ func (c *ekmRESTClient) CreateEkmConnection(ctx context.Context, req *kmspb.Crea return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -927,7 +995,7 @@ func (c *ekmRESTClient) UpdateEkmConnection(ctx context.Context, req *kmspb.Upda if err != nil { return nil, err } - params.Add("updateMask", string(updateMask)) + params.Add("updateMask", string(updateMask[1:len(updateMask)-1])) } baseUrl.RawQuery = params.Encode() @@ -960,13 +1028,207 @@ func (c *ekmRESTClient) UpdateEkmConnection(ctx context.Context, req *kmspb.Upda return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetEkmConfig returns the EkmConfig singleton resource +// for a given project and location. +func (c *ekmRESTClient) GetEkmConfig(ctx context.Context, req *kmspb.GetEkmConfigRequest, opts ...gax.CallOption) (*kmspb.EkmConfig, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + opts = append((*c.CallOptions).GetEkmConfig[0:len((*c.CallOptions).GetEkmConfig):len((*c.CallOptions).GetEkmConfig)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.EkmConfig{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// UpdateEkmConfig updates the EkmConfig singleton resource +// for a given project and location. +func (c *ekmRESTClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEkmConfigRequest, opts ...gax.CallOption) (*kmspb.EkmConfig, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + body := req.GetEkmConfig() + jsonReq, err := m.Marshal(body) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetEkmConfig().GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetUpdateMask() != nil { + updateMask, err := protojson.Marshal(req.GetUpdateMask()) + if err != nil { + return nil, err + } + params.Add("updateMask", string(updateMask[1:len(updateMask)-1])) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "ekm_config.name", url.QueryEscape(req.GetEkmConfig().GetName()))) + + headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + opts = append((*c.CallOptions).UpdateEkmConfig[0:len((*c.CallOptions).UpdateEkmConfig):len((*c.CallOptions).UpdateEkmConfig)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.EkmConfig{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// VerifyConnectivity verifies that Cloud KMS can successfully connect to the external key +// manager specified by an EkmConnection. +// If there is an error connecting to the EKM, this method returns a +// FAILED_PRECONDITION status containing structured information as described +// at https://cloud.google.com/kms/docs/reference/ekm_errors (at https://cloud.google.com/kms/docs/reference/ekm_errors). +func (c *ekmRESTClient) VerifyConnectivity(ctx context.Context, req *kmspb.VerifyConnectivityRequest, opts ...gax.CallOption) (*kmspb.VerifyConnectivityResponse, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:verifyConnectivity", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + opts = append((*c.CallOptions).VerifyConnectivity[0:len((*c.CallOptions).VerifyConnectivity):len((*c.CallOptions).VerifyConnectivity)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.VerifyConnectivityResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err } return nil @@ -1018,13 +1280,13 @@ func (c *ekmRESTClient) GetLocation(ctx context.Context, req *locationpb.GetLoca return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -1092,13 +1354,13 @@ func (c *ekmRESTClient) ListLocations(ctx context.Context, req *locationpb.ListL return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -1171,13 +1433,13 @@ func (c *ekmRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolic return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -1239,13 +1501,13 @@ func (c *ekmRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolic return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -1309,13 +1571,13 @@ func (c *ekmRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestI return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil diff --git a/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json b/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json deleted file mode 100644 index a45d0041..00000000 --- a/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json +++ /dev/null @@ -1,437 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", - "language": "go", - "protoPackage": "google.cloud.kms.v1", - "libraryPackage": "cloud.google.com/go/kms/apiv1", - "services": { - "EkmService": { - "clients": { - "grpc": { - "libraryClient": "EkmClient", - "rpcs": { - "CreateEkmConnection": { - "methods": [ - "CreateEkmConnection" - ] - }, - "GetEkmConnection": { - "methods": [ - "GetEkmConnection" - ] - }, - "GetIamPolicy": { - "methods": [ - "GetIamPolicy" - ] - }, - "GetLocation": { - "methods": [ - "GetLocation" - ] - }, - "ListEkmConnections": { - "methods": [ - "ListEkmConnections" - ] - }, - "ListLocations": { - "methods": [ - "ListLocations" - ] - }, - "SetIamPolicy": { - "methods": [ - "SetIamPolicy" - ] - }, - "TestIamPermissions": { - "methods": [ - "TestIamPermissions" - ] - }, - "UpdateEkmConnection": { - "methods": [ - "UpdateEkmConnection" - ] - } - } - }, - "rest": { - "libraryClient": "EkmClient", - "rpcs": { - "CreateEkmConnection": { - "methods": [ - "CreateEkmConnection" - ] - }, - "GetEkmConnection": { - "methods": [ - "GetEkmConnection" - ] - }, - "GetIamPolicy": { - "methods": [ - "GetIamPolicy" - ] - }, - "GetLocation": { - "methods": [ - "GetLocation" - ] - }, - "ListEkmConnections": { - "methods": [ - "ListEkmConnections" - ] - }, - "ListLocations": { - "methods": [ - "ListLocations" - ] - }, - "SetIamPolicy": { - "methods": [ - "SetIamPolicy" - ] - }, - "TestIamPermissions": { - "methods": [ - "TestIamPermissions" - ] - }, - "UpdateEkmConnection": { - "methods": [ - "UpdateEkmConnection" - ] - } - } - } - } - }, - "KeyManagementService": { - "clients": { - "grpc": { - "libraryClient": "KeyManagementClient", - "rpcs": { - "AsymmetricDecrypt": { - "methods": [ - "AsymmetricDecrypt" - ] - }, - "AsymmetricSign": { - "methods": [ - "AsymmetricSign" - ] - }, - "CreateCryptoKey": { - "methods": [ - "CreateCryptoKey" - ] - }, - "CreateCryptoKeyVersion": { - "methods": [ - "CreateCryptoKeyVersion" - ] - }, - "CreateImportJob": { - "methods": [ - "CreateImportJob" - ] - }, - "CreateKeyRing": { - "methods": [ - "CreateKeyRing" - ] - }, - "Decrypt": { - "methods": [ - "Decrypt" - ] - }, - "DestroyCryptoKeyVersion": { - "methods": [ - "DestroyCryptoKeyVersion" - ] - }, - "Encrypt": { - "methods": [ - "Encrypt" - ] - }, - "GenerateRandomBytes": { - "methods": [ - "GenerateRandomBytes" - ] - }, - "GetCryptoKey": { - "methods": [ - "GetCryptoKey" - ] - }, - "GetCryptoKeyVersion": { - "methods": [ - "GetCryptoKeyVersion" - ] - }, - "GetIamPolicy": { - "methods": [ - "GetIamPolicy" - ] - }, - "GetImportJob": { - "methods": [ - "GetImportJob" - ] - }, - "GetKeyRing": { - "methods": [ - "GetKeyRing" - ] - }, - "GetLocation": { - "methods": [ - "GetLocation" - ] - }, - "GetPublicKey": { - "methods": [ - "GetPublicKey" - ] - }, - "ImportCryptoKeyVersion": { - "methods": [ - "ImportCryptoKeyVersion" - ] - }, - "ListCryptoKeyVersions": { - "methods": [ - "ListCryptoKeyVersions" - ] - }, - "ListCryptoKeys": { - "methods": [ - "ListCryptoKeys" - ] - }, - "ListImportJobs": { - "methods": [ - "ListImportJobs" - ] - }, - "ListKeyRings": { - "methods": [ - "ListKeyRings" - ] - }, - "ListLocations": { - "methods": [ - "ListLocations" - ] - }, - "MacSign": { - "methods": [ - "MacSign" - ] - }, - "MacVerify": { - "methods": [ - "MacVerify" - ] - }, - "RestoreCryptoKeyVersion": { - "methods": [ - "RestoreCryptoKeyVersion" - ] - }, - "SetIamPolicy": { - "methods": [ - "SetIamPolicy" - ] - }, - "TestIamPermissions": { - "methods": [ - "TestIamPermissions" - ] - }, - "UpdateCryptoKey": { - "methods": [ - "UpdateCryptoKey" - ] - }, - "UpdateCryptoKeyPrimaryVersion": { - "methods": [ - "UpdateCryptoKeyPrimaryVersion" - ] - }, - "UpdateCryptoKeyVersion": { - "methods": [ - "UpdateCryptoKeyVersion" - ] - } - } - }, - "rest": { - "libraryClient": "KeyManagementClient", - "rpcs": { - "AsymmetricDecrypt": { - "methods": [ - "AsymmetricDecrypt" - ] - }, - "AsymmetricSign": { - "methods": [ - "AsymmetricSign" - ] - }, - "CreateCryptoKey": { - "methods": [ - "CreateCryptoKey" - ] - }, - "CreateCryptoKeyVersion": { - "methods": [ - "CreateCryptoKeyVersion" - ] - }, - "CreateImportJob": { - "methods": [ - "CreateImportJob" - ] - }, - "CreateKeyRing": { - "methods": [ - "CreateKeyRing" - ] - }, - "Decrypt": { - "methods": [ - "Decrypt" - ] - }, - "DestroyCryptoKeyVersion": { - "methods": [ - "DestroyCryptoKeyVersion" - ] - }, - "Encrypt": { - "methods": [ - "Encrypt" - ] - }, - "GenerateRandomBytes": { - "methods": [ - "GenerateRandomBytes" - ] - }, - "GetCryptoKey": { - "methods": [ - "GetCryptoKey" - ] - }, - "GetCryptoKeyVersion": { - "methods": [ - "GetCryptoKeyVersion" - ] - }, - "GetIamPolicy": { - "methods": [ - "GetIamPolicy" - ] - }, - "GetImportJob": { - "methods": [ - "GetImportJob" - ] - }, - "GetKeyRing": { - "methods": [ - "GetKeyRing" - ] - }, - "GetLocation": { - "methods": [ - "GetLocation" - ] - }, - "GetPublicKey": { - "methods": [ - "GetPublicKey" - ] - }, - "ImportCryptoKeyVersion": { - "methods": [ - "ImportCryptoKeyVersion" - ] - }, - "ListCryptoKeyVersions": { - "methods": [ - "ListCryptoKeyVersions" - ] - }, - "ListCryptoKeys": { - "methods": [ - "ListCryptoKeys" - ] - }, - "ListImportJobs": { - "methods": [ - "ListImportJobs" - ] - }, - "ListKeyRings": { - "methods": [ - "ListKeyRings" - ] - }, - "ListLocations": { - "methods": [ - "ListLocations" - ] - }, - "MacSign": { - "methods": [ - "MacSign" - ] - }, - "MacVerify": { - "methods": [ - "MacVerify" - ] - }, - "RestoreCryptoKeyVersion": { - "methods": [ - "RestoreCryptoKeyVersion" - ] - }, - "SetIamPolicy": { - "methods": [ - "SetIamPolicy" - ] - }, - "TestIamPermissions": { - "methods": [ - "TestIamPermissions" - ] - }, - "UpdateCryptoKey": { - "methods": [ - "UpdateCryptoKey" - ] - }, - "UpdateCryptoKeyPrimaryVersion": { - "methods": [ - "UpdateCryptoKeyPrimaryVersion" - ] - }, - "UpdateCryptoKeyVersion": { - "methods": [ - "UpdateCryptoKeyVersion" - ] - } - } - } - } - } - } -} diff --git a/vendor/cloud.google.com/go/kms/apiv1/iam.go b/vendor/cloud.google.com/go/kms/apiv1/iam.go index 9fb2a6ab..a86b08a6 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/iam.go +++ b/vendor/cloud.google.com/go/kms/apiv1/iam.go @@ -16,7 +16,7 @@ package kms import ( "cloud.google.com/go/iam" - kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" + "cloud.google.com/go/kms/apiv1/kmspb" ) // KeyRingIAM returns a handle to inspect and change permissions of a KeyRing. diff --git a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go index a3471fd1..56e2ac94 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go @@ -20,12 +20,13 @@ import ( "bytes" "context" "fmt" - "io/ioutil" + "io" "math" "net/http" "net/url" "time" + iampb "cloud.google.com/go/iam/apiv1/iampb" kmspb "cloud.google.com/go/kms/apiv1/kmspb" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" @@ -35,7 +36,6 @@ import ( gtransport "google.golang.org/api/transport/grpc" httptransport "google.golang.org/api/transport/http" locationpb "google.golang.org/genproto/googleapis/cloud/location" - iampb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" @@ -95,6 +95,7 @@ func defaultKeyManagementGRPCClientOptions() []option.ClientOption { func defaultKeyManagementCallOptions() *KeyManagementCallOptions { return &KeyManagementCallOptions{ ListKeyRings: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -107,6 +108,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, ListCryptoKeys: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -119,6 +121,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, ListCryptoKeyVersions: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -131,6 +134,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, ListImportJobs: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -143,6 +147,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, GetKeyRing: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -155,6 +160,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, GetCryptoKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -167,6 +173,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, GetCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -179,6 +186,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, GetPublicKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -191,6 +199,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, GetImportJob: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -203,6 +212,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, CreateKeyRing: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -215,6 +225,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, CreateCryptoKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -226,9 +237,14 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }) }), }, - CreateCryptoKeyVersion: []gax.CallOption{}, - ImportCryptoKeyVersion: []gax.CallOption{}, + CreateCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + }, + ImportCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + }, CreateImportJob: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -241,6 +257,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, UpdateCryptoKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -253,6 +270,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, UpdateCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -265,6 +283,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, UpdateCryptoKeyPrimaryVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -277,6 +296,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, DestroyCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -289,6 +309,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, RestoreCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -301,6 +322,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, Encrypt: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -313,6 +335,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, Decrypt: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -325,6 +348,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, AsymmetricSign: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -337,6 +361,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, AsymmetricDecrypt: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -349,6 +374,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, MacSign: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -361,6 +387,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, MacVerify: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -373,6 +400,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { }), }, GenerateRandomBytes: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, @@ -395,6 +423,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions { func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { return &KeyManagementCallOptions{ ListKeyRings: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -406,6 +435,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, ListCryptoKeys: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -417,6 +447,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, ListCryptoKeyVersions: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -428,6 +459,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, ListImportJobs: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -439,6 +471,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, GetKeyRing: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -450,6 +483,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, GetCryptoKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -461,6 +495,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, GetCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -472,6 +507,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, GetPublicKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -483,6 +519,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, GetImportJob: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -494,6 +531,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, CreateKeyRing: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -505,6 +543,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, CreateCryptoKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -515,9 +554,14 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { http.StatusGatewayTimeout) }), }, - CreateCryptoKeyVersion: []gax.CallOption{}, - ImportCryptoKeyVersion: []gax.CallOption{}, + CreateCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + }, + ImportCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + }, CreateImportJob: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -529,6 +573,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, UpdateCryptoKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -540,6 +585,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, UpdateCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -551,6 +597,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, UpdateCryptoKeyPrimaryVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -562,6 +609,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, DestroyCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -573,6 +621,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, RestoreCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -584,6 +633,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, Encrypt: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -595,6 +645,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, Decrypt: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -606,6 +657,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, AsymmetricSign: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -617,6 +669,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, AsymmetricDecrypt: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -628,6 +681,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, MacSign: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -639,6 +693,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, MacVerify: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -650,6 +705,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { }), }, GenerateRandomBytes: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { return gax.OnHTTPCodes(gax.Backoff{ Initial: 100 * time.Millisecond, @@ -1022,9 +1078,6 @@ type keyManagementGRPCClient struct { // Connection pool of gRPC connections to the service. connPool gtransport.ConnPool - // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE - disableDeadlines bool - // Points back to the CallOptions field of the containing KeyManagementClient CallOptions **KeyManagementCallOptions @@ -1067,11 +1120,6 @@ func NewKeyManagementClient(ctx context.Context, opts ...option.ClientOption) (* clientOpts = append(clientOpts, hookOpts...) } - disableDeadlines, err := checkDisableDeadlines() - if err != nil { - return nil, err - } - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) if err != nil { return nil, err @@ -1080,7 +1128,6 @@ func NewKeyManagementClient(ctx context.Context, opts ...option.ClientOption) (* c := &keyManagementGRPCClient{ connPool: connPool, - disableDeadlines: disableDeadlines, keyManagementClient: kmspb.NewKeyManagementServiceClient(connPool), CallOptions: &client.CallOptions, iamPolicyClient: iampb.NewIAMPolicyClient(connPool), @@ -1105,7 +1152,7 @@ func (c *keyManagementGRPCClient) Connection() *grpc.ClientConn { // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *keyManagementGRPCClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } @@ -1179,7 +1226,7 @@ func defaultKeyManagementRESTClientOptions() []option.ClientOption { // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *keyManagementRESTClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } @@ -1379,11 +1426,6 @@ func (c *keyManagementGRPCClient) ListImportJobs(ctx context.Context, req *kmspb } func (c *keyManagementGRPCClient) GetKeyRing(ctx context.Context, req *kmspb.GetKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1401,11 +1443,6 @@ func (c *keyManagementGRPCClient) GetKeyRing(ctx context.Context, req *kmspb.Get } func (c *keyManagementGRPCClient) GetCryptoKey(ctx context.Context, req *kmspb.GetCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1423,11 +1460,6 @@ func (c *keyManagementGRPCClient) GetCryptoKey(ctx context.Context, req *kmspb.G } func (c *keyManagementGRPCClient) GetCryptoKeyVersion(ctx context.Context, req *kmspb.GetCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1445,11 +1477,6 @@ func (c *keyManagementGRPCClient) GetCryptoKeyVersion(ctx context.Context, req * } func (c *keyManagementGRPCClient) GetPublicKey(ctx context.Context, req *kmspb.GetPublicKeyRequest, opts ...gax.CallOption) (*kmspb.PublicKey, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1467,11 +1494,6 @@ func (c *keyManagementGRPCClient) GetPublicKey(ctx context.Context, req *kmspb.G } func (c *keyManagementGRPCClient) GetImportJob(ctx context.Context, req *kmspb.GetImportJobRequest, opts ...gax.CallOption) (*kmspb.ImportJob, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1489,11 +1511,6 @@ func (c *keyManagementGRPCClient) GetImportJob(ctx context.Context, req *kmspb.G } func (c *keyManagementGRPCClient) CreateKeyRing(ctx context.Context, req *kmspb.CreateKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1511,11 +1528,6 @@ func (c *keyManagementGRPCClient) CreateKeyRing(ctx context.Context, req *kmspb. } func (c *keyManagementGRPCClient) CreateCryptoKey(ctx context.Context, req *kmspb.CreateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1533,11 +1545,6 @@ func (c *keyManagementGRPCClient) CreateCryptoKey(ctx context.Context, req *kmsp } func (c *keyManagementGRPCClient) CreateCryptoKeyVersion(ctx context.Context, req *kmspb.CreateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1555,11 +1562,6 @@ func (c *keyManagementGRPCClient) CreateCryptoKeyVersion(ctx context.Context, re } func (c *keyManagementGRPCClient) ImportCryptoKeyVersion(ctx context.Context, req *kmspb.ImportCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1577,11 +1579,6 @@ func (c *keyManagementGRPCClient) ImportCryptoKeyVersion(ctx context.Context, re } func (c *keyManagementGRPCClient) CreateImportJob(ctx context.Context, req *kmspb.CreateImportJobRequest, opts ...gax.CallOption) (*kmspb.ImportJob, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1599,11 +1596,6 @@ func (c *keyManagementGRPCClient) CreateImportJob(ctx context.Context, req *kmsp } func (c *keyManagementGRPCClient) UpdateCryptoKey(ctx context.Context, req *kmspb.UpdateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "crypto_key.name", url.QueryEscape(req.GetCryptoKey().GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1621,11 +1613,6 @@ func (c *keyManagementGRPCClient) UpdateCryptoKey(ctx context.Context, req *kmsp } func (c *keyManagementGRPCClient) UpdateCryptoKeyVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "crypto_key_version.name", url.QueryEscape(req.GetCryptoKeyVersion().GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1643,11 +1630,6 @@ func (c *keyManagementGRPCClient) UpdateCryptoKeyVersion(ctx context.Context, re } func (c *keyManagementGRPCClient) UpdateCryptoKeyPrimaryVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyPrimaryVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1665,11 +1647,6 @@ func (c *keyManagementGRPCClient) UpdateCryptoKeyPrimaryVersion(ctx context.Cont } func (c *keyManagementGRPCClient) DestroyCryptoKeyVersion(ctx context.Context, req *kmspb.DestroyCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1687,11 +1664,6 @@ func (c *keyManagementGRPCClient) DestroyCryptoKeyVersion(ctx context.Context, r } func (c *keyManagementGRPCClient) RestoreCryptoKeyVersion(ctx context.Context, req *kmspb.RestoreCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1709,11 +1681,6 @@ func (c *keyManagementGRPCClient) RestoreCryptoKeyVersion(ctx context.Context, r } func (c *keyManagementGRPCClient) Encrypt(ctx context.Context, req *kmspb.EncryptRequest, opts ...gax.CallOption) (*kmspb.EncryptResponse, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1731,11 +1698,6 @@ func (c *keyManagementGRPCClient) Encrypt(ctx context.Context, req *kmspb.Encryp } func (c *keyManagementGRPCClient) Decrypt(ctx context.Context, req *kmspb.DecryptRequest, opts ...gax.CallOption) (*kmspb.DecryptResponse, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1753,11 +1715,6 @@ func (c *keyManagementGRPCClient) Decrypt(ctx context.Context, req *kmspb.Decryp } func (c *keyManagementGRPCClient) AsymmetricSign(ctx context.Context, req *kmspb.AsymmetricSignRequest, opts ...gax.CallOption) (*kmspb.AsymmetricSignResponse, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1775,11 +1732,6 @@ func (c *keyManagementGRPCClient) AsymmetricSign(ctx context.Context, req *kmspb } func (c *keyManagementGRPCClient) AsymmetricDecrypt(ctx context.Context, req *kmspb.AsymmetricDecryptRequest, opts ...gax.CallOption) (*kmspb.AsymmetricDecryptResponse, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1797,11 +1749,6 @@ func (c *keyManagementGRPCClient) AsymmetricDecrypt(ctx context.Context, req *km } func (c *keyManagementGRPCClient) MacSign(ctx context.Context, req *kmspb.MacSignRequest, opts ...gax.CallOption) (*kmspb.MacSignResponse, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1819,11 +1766,6 @@ func (c *keyManagementGRPCClient) MacSign(ctx context.Context, req *kmspb.MacSig } func (c *keyManagementGRPCClient) MacVerify(ctx context.Context, req *kmspb.MacVerifyRequest, opts ...gax.CallOption) (*kmspb.MacVerifyResponse, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -1841,11 +1783,6 @@ func (c *keyManagementGRPCClient) MacVerify(ctx context.Context, req *kmspb.MacV } func (c *keyManagementGRPCClient) GenerateRandomBytes(ctx context.Context, req *kmspb.GenerateRandomBytesRequest, opts ...gax.CallOption) (*kmspb.GenerateRandomBytesResponse, error) { - if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { - cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) - defer cancel() - ctx = cctx - } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "location", url.QueryEscape(req.GetLocation()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) @@ -2035,13 +1972,13 @@ func (c *keyManagementRESTClient) ListKeyRings(ctx context.Context, req *kmspb.L return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -2132,13 +2069,13 @@ func (c *keyManagementRESTClient) ListCryptoKeys(ctx context.Context, req *kmspb return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -2229,13 +2166,13 @@ func (c *keyManagementRESTClient) ListCryptoKeyVersions(ctx context.Context, req return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -2323,13 +2260,13 @@ func (c *keyManagementRESTClient) ListImportJobs(ctx context.Context, req *kmspb return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -2398,13 +2335,13 @@ func (c *keyManagementRESTClient) GetKeyRing(ctx context.Context, req *kmspb.Get return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -2458,13 +2395,13 @@ func (c *keyManagementRESTClient) GetCryptoKey(ctx context.Context, req *kmspb.G return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -2517,13 +2454,13 @@ func (c *keyManagementRESTClient) GetCryptoKeyVersion(ctx context.Context, req * return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -2580,13 +2517,13 @@ func (c *keyManagementRESTClient) GetPublicKey(ctx context.Context, req *kmspb.G return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -2638,13 +2575,13 @@ func (c *keyManagementRESTClient) GetImportJob(ctx context.Context, req *kmspb.G return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -2705,13 +2642,13 @@ func (c *keyManagementRESTClient) CreateKeyRing(ctx context.Context, req *kmspb. return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -2779,13 +2716,13 @@ func (c *keyManagementRESTClient) CreateCryptoKey(ctx context.Context, req *kmsp return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -2849,13 +2786,13 @@ func (c *keyManagementRESTClient) CreateCryptoKeyVersion(ctx context.Context, re return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -2920,13 +2857,13 @@ func (c *keyManagementRESTClient) ImportCryptoKeyVersion(ctx context.Context, re return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -2990,13 +2927,13 @@ func (c *keyManagementRESTClient) CreateImportJob(ctx context.Context, req *kmsp return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3029,7 +2966,7 @@ func (c *keyManagementRESTClient) UpdateCryptoKey(ctx context.Context, req *kmsp if err != nil { return nil, err } - params.Add("updateMask", string(updateMask)) + params.Add("updateMask", string(updateMask[1:len(updateMask)-1])) } baseUrl.RawQuery = params.Encode() @@ -3062,13 +2999,13 @@ func (c *keyManagementRESTClient) UpdateCryptoKey(ctx context.Context, req *kmsp return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3112,7 +3049,7 @@ func (c *keyManagementRESTClient) UpdateCryptoKeyVersion(ctx context.Context, re if err != nil { return nil, err } - params.Add("updateMask", string(updateMask)) + params.Add("updateMask", string(updateMask[1:len(updateMask)-1])) } baseUrl.RawQuery = params.Encode() @@ -3145,13 +3082,13 @@ func (c *keyManagementRESTClient) UpdateCryptoKeyVersion(ctx context.Context, re return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3214,13 +3151,13 @@ func (c *keyManagementRESTClient) UpdateCryptoKeyPrimaryVersion(ctx context.Cont return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3298,13 +3235,13 @@ func (c *keyManagementRESTClient) DestroyCryptoKeyVersion(ctx context.Context, r return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3370,13 +3307,13 @@ func (c *keyManagementRESTClient) RestoreCryptoKeyVersion(ctx context.Context, r return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3437,13 +3374,13 @@ func (c *keyManagementRESTClient) Encrypt(ctx context.Context, req *kmspb.Encryp return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3504,13 +3441,13 @@ func (c *keyManagementRESTClient) Decrypt(ctx context.Context, req *kmspb.Decryp return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3572,13 +3509,13 @@ func (c *keyManagementRESTClient) AsymmetricSign(ctx context.Context, req *kmspb return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3640,13 +3577,13 @@ func (c *keyManagementRESTClient) AsymmetricDecrypt(ctx context.Context, req *km return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3706,13 +3643,13 @@ func (c *keyManagementRESTClient) MacSign(ctx context.Context, req *kmspb.MacSig return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3773,13 +3710,13 @@ func (c *keyManagementRESTClient) MacVerify(ctx context.Context, req *kmspb.MacV return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3838,13 +3775,13 @@ func (c *keyManagementRESTClient) GenerateRandomBytes(ctx context.Context, req * return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3896,13 +3833,13 @@ func (c *keyManagementRESTClient) GetLocation(ctx context.Context, req *location return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -3970,13 +3907,13 @@ func (c *keyManagementRESTClient) ListLocations(ctx context.Context, req *locati return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -4049,13 +3986,13 @@ func (c *keyManagementRESTClient) GetIamPolicy(ctx context.Context, req *iampb.G return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -4117,13 +4054,13 @@ func (c *keyManagementRESTClient) SetIamPolicy(ctx context.Context, req *iampb.S return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil @@ -4187,13 +4124,13 @@ func (c *keyManagementRESTClient) TestIamPermissions(ctx context.Context, req *i return err } - buf, err := ioutil.ReadAll(httpRsp.Body) + buf, err := io.ReadAll(httpRsp.Body) if err != nil { return err } if err := unm.Unmarshal(buf, resp); err != nil { - return maybeUnknownEnum(err) + return err } return nil diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go index 2cf0cd4f..c0fd16ed 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc-gen-go v1.30.0 +// protoc v4.23.2 // source: google/cloud/kms/v1/ekm_service.proto package kmspb @@ -42,6 +42,85 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// [KeyManagementMode][google.cloud.kms.v1.EkmConnection.KeyManagementMode] +// describes who can perform control plane cryptographic operations using this +// [EkmConnection][google.cloud.kms.v1.EkmConnection]. +type EkmConnection_KeyManagementMode int32 + +const ( + // Not specified. + EkmConnection_KEY_MANAGEMENT_MODE_UNSPECIFIED EkmConnection_KeyManagementMode = 0 + // EKM-side key management operations on + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] created with this + // [EkmConnection][google.cloud.kms.v1.EkmConnection] must be initiated from + // the EKM directly and cannot be performed from Cloud KMS. This means that: + // * When creating a + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] associated with + // this + // [EkmConnection][google.cloud.kms.v1.EkmConnection], the caller must + // supply the key path of pre-existing external key material that will be + // linked to the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + // * Destruction of external key material cannot be requested via the + // Cloud KMS API and must be performed directly in the EKM. + // * Automatic rotation of key material is not supported. + EkmConnection_MANUAL EkmConnection_KeyManagementMode = 1 + // All [CryptoKeys][google.cloud.kms.v1.CryptoKey] created with this + // [EkmConnection][google.cloud.kms.v1.EkmConnection] use EKM-side key + // management operations initiated from Cloud KMS. This means that: + // * When a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // associated with this [EkmConnection][google.cloud.kms.v1.EkmConnection] + // is + // created, the EKM automatically generates new key material and a new + // key path. The caller cannot supply the key path of pre-existing + // external key material. + // * Destruction of external key material associated with this + // [EkmConnection][google.cloud.kms.v1.EkmConnection] can be requested by + // calling [DestroyCryptoKeyVersion][EkmService.DestroyCryptoKeyVersion]. + // * Automatic rotation of key material is supported. + EkmConnection_CLOUD_KMS EkmConnection_KeyManagementMode = 2 +) + +// Enum value maps for EkmConnection_KeyManagementMode. +var ( + EkmConnection_KeyManagementMode_name = map[int32]string{ + 0: "KEY_MANAGEMENT_MODE_UNSPECIFIED", + 1: "MANUAL", + 2: "CLOUD_KMS", + } + EkmConnection_KeyManagementMode_value = map[string]int32{ + "KEY_MANAGEMENT_MODE_UNSPECIFIED": 0, + "MANUAL": 1, + "CLOUD_KMS": 2, + } +) + +func (x EkmConnection_KeyManagementMode) Enum() *EkmConnection_KeyManagementMode { + p := new(EkmConnection_KeyManagementMode) + *p = x + return p +} + +func (x EkmConnection_KeyManagementMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EkmConnection_KeyManagementMode) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_ekm_service_proto_enumTypes[0].Descriptor() +} + +func (EkmConnection_KeyManagementMode) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_ekm_service_proto_enumTypes[0] +} + +func (x EkmConnection_KeyManagementMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EkmConnection_KeyManagementMode.Descriptor instead. +func (EkmConnection_KeyManagementMode) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{8, 0} +} + // Request message for // [EkmService.ListEkmConnections][google.cloud.kms.v1.EkmService.ListEkmConnections]. type ListEkmConnectionsRequest struct { @@ -397,6 +476,116 @@ func (x *UpdateEkmConnectionRequest) GetUpdateMask() *fieldmaskpb.FieldMask { return nil } +// Request message for +// [EkmService.GetEkmConfig][google.cloud.kms.v1.EkmService.GetEkmConfig]. +type GetEkmConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.EkmConfig.name] of the + // [EkmConfig][google.cloud.kms.v1.EkmConfig] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetEkmConfigRequest) Reset() { + *x = GetEkmConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEkmConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEkmConfigRequest) ProtoMessage() {} + +func (x *GetEkmConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEkmConfigRequest.ProtoReflect.Descriptor instead. +func (*GetEkmConfigRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{5} +} + +func (x *GetEkmConfigRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [EkmService.UpdateEkmConfig][google.cloud.kms.v1.EkmService.UpdateEkmConfig]. +type UpdateEkmConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. [EkmConfig][google.cloud.kms.v1.EkmConfig] with updated values. + EkmConfig *EkmConfig `protobuf:"bytes,1,opt,name=ekm_config,json=ekmConfig,proto3" json:"ekm_config,omitempty"` + // Required. List of fields to be updated in this request. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateEkmConfigRequest) Reset() { + *x = UpdateEkmConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateEkmConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateEkmConfigRequest) ProtoMessage() {} + +func (x *UpdateEkmConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateEkmConfigRequest.ProtoReflect.Descriptor instead. +func (*UpdateEkmConfigRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{6} +} + +func (x *UpdateEkmConfigRequest) GetEkmConfig() *EkmConfig { + if x != nil { + return x.EkmConfig + } + return nil +} + +func (x *UpdateEkmConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + // A [Certificate][google.cloud.kms.v1.Certificate] represents an X.509 // certificate used to authenticate HTTPS connections to EKM replicas. type Certificate struct { @@ -434,7 +623,7 @@ type Certificate struct { func (x *Certificate) Reset() { *x = Certificate{} if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -447,7 +636,7 @@ func (x *Certificate) String() string { func (*Certificate) ProtoMessage() {} func (x *Certificate) ProtoReflect() protoreflect.Message { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -460,7 +649,7 @@ func (x *Certificate) ProtoReflect() protoreflect.Message { // Deprecated: Use Certificate.ProtoReflect.Descriptor instead. func (*Certificate) Descriptor() ([]byte, []int) { - return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{5} + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{7} } func (x *Certificate) GetRawDer() []byte { @@ -556,12 +745,22 @@ type EkmConnection struct { // Optional. Etag of the currently stored // [EkmConnection][google.cloud.kms.v1.EkmConnection]. Etag string `protobuf:"bytes,5,opt,name=etag,proto3" json:"etag,omitempty"` + // Optional. Describes who can perform control plane operations on the EKM. If + // unset, this defaults to + // [MANUAL][google.cloud.kms.v1.EkmConnection.KeyManagementMode.MANUAL]. + KeyManagementMode EkmConnection_KeyManagementMode `protobuf:"varint,6,opt,name=key_management_mode,json=keyManagementMode,proto3,enum=google.cloud.kms.v1.EkmConnection_KeyManagementMode" json:"key_management_mode,omitempty"` + // Optional. Identifies the EKM Crypto Space that this + // [EkmConnection][google.cloud.kms.v1.EkmConnection] maps to. Note: This + // field is required if + // [KeyManagementMode][google.cloud.kms.v1.EkmConnection.KeyManagementMode] is + // [CLOUD_KMS][google.cloud.kms.v1.EkmConnection.KeyManagementMode.CLOUD_KMS]. + CryptoSpacePath string `protobuf:"bytes,7,opt,name=crypto_space_path,json=cryptoSpacePath,proto3" json:"crypto_space_path,omitempty"` } func (x *EkmConnection) Reset() { *x = EkmConnection{} if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -574,7 +773,7 @@ func (x *EkmConnection) String() string { func (*EkmConnection) ProtoMessage() {} func (x *EkmConnection) ProtoReflect() protoreflect.Message { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -587,7 +786,7 @@ func (x *EkmConnection) ProtoReflect() protoreflect.Message { // Deprecated: Use EkmConnection.ProtoReflect.Descriptor instead. func (*EkmConnection) Descriptor() ([]byte, []int) { - return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{6} + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{8} } func (x *EkmConnection) GetName() string { @@ -618,6 +817,179 @@ func (x *EkmConnection) GetEtag() string { return "" } +func (x *EkmConnection) GetKeyManagementMode() EkmConnection_KeyManagementMode { + if x != nil { + return x.KeyManagementMode + } + return EkmConnection_KEY_MANAGEMENT_MODE_UNSPECIFIED +} + +func (x *EkmConnection) GetCryptoSpacePath() string { + if x != nil { + return x.CryptoSpacePath + } + return "" +} + +// An [EkmConfig][google.cloud.kms.v1.EkmConfig] is a singleton resource that +// represents configuration parameters that apply to all +// [CryptoKeys][google.cloud.kms.v1.CryptoKey] and +// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] with a +// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of +// [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC] in a given +// project and location. +type EkmConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The resource name for the + // [EkmConfig][google.cloud.kms.v1.EkmConfig] in the format + // `projects/*/locations/*/ekmConfig`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Optional. Resource name of the default + // [EkmConnection][google.cloud.kms.v1.EkmConnection]. Setting this field to + // the empty string removes the default. + DefaultEkmConnection string `protobuf:"bytes,2,opt,name=default_ekm_connection,json=defaultEkmConnection,proto3" json:"default_ekm_connection,omitempty"` +} + +func (x *EkmConfig) Reset() { + *x = EkmConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EkmConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EkmConfig) ProtoMessage() {} + +func (x *EkmConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EkmConfig.ProtoReflect.Descriptor instead. +func (*EkmConfig) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{9} +} + +func (x *EkmConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *EkmConfig) GetDefaultEkmConnection() string { + if x != nil { + return x.DefaultEkmConnection + } + return "" +} + +// Request message for +// [EkmService.VerifyConnectivity][google.cloud.kms.v1.EkmService.VerifyConnectivity]. +type VerifyConnectivityRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.EkmConnection.name] of the + // [EkmConnection][google.cloud.kms.v1.EkmConnection] to verify. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *VerifyConnectivityRequest) Reset() { + *x = VerifyConnectivityRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VerifyConnectivityRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyConnectivityRequest) ProtoMessage() {} + +func (x *VerifyConnectivityRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyConnectivityRequest.ProtoReflect.Descriptor instead. +func (*VerifyConnectivityRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{10} +} + +func (x *VerifyConnectivityRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Response message for +// [EkmService.VerifyConnectivity][google.cloud.kms.v1.EkmService.VerifyConnectivity]. +type VerifyConnectivityResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VerifyConnectivityResponse) Reset() { + *x = VerifyConnectivityResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VerifyConnectivityResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyConnectivityResponse) ProtoMessage() {} + +func (x *VerifyConnectivityResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyConnectivityResponse.ProtoReflect.Descriptor instead. +func (*VerifyConnectivityResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{11} +} + // A [ServiceResolver][google.cloud.kms.v1.EkmConnection.ServiceResolver] // represents an EKM replica that can be reached within an // [EkmConnection][google.cloud.kms.v1.EkmConnection]. @@ -648,7 +1020,7 @@ type EkmConnection_ServiceResolver struct { func (x *EkmConnection_ServiceResolver) Reset() { *x = EkmConnection_ServiceResolver{} if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -661,7 +1033,7 @@ func (x *EkmConnection_ServiceResolver) String() string { func (*EkmConnection_ServiceResolver) ProtoMessage() {} func (x *EkmConnection_ServiceResolver) ProtoReflect() protoreflect.Message { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -674,7 +1046,7 @@ func (x *EkmConnection_ServiceResolver) ProtoReflect() protoreflect.Message { // Deprecated: Use EkmConnection_ServiceResolver.ProtoReflect.Descriptor instead. func (*EkmConnection_ServiceResolver) Descriptor() ([]byte, []int) { - return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{6, 0} + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{8, 0} } func (x *EkmConnection_ServiceResolver) GetServiceDirectoryService() string { @@ -779,152 +1151,239 @@ var file_google_cloud_kms_v1_ekm_service_proto_rawDesc = []byte{ 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xba, 0x03, 0x0a, 0x0b, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x72, - 0x61, 0x77, 0x5f, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x06, 0x72, 0x61, 0x77, 0x44, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x61, 0x72, - 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, - 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, - 0x75, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x46, 0x0a, 0x1d, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1a, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, - 0x76, 0x65, 0x44, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x0f, 0x6e, 0x6f, - 0x74, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6e, 0x6f, - 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x65, - 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x12, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x66, - 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x11, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x46, 0x69, 0x6e, - 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, 0x81, 0x05, 0x0a, 0x0d, 0x45, 0x6b, 0x6d, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x5f, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, - 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, - 0x6c, 0x76, 0x65, 0x72, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, - 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x1a, - 0xa5, 0x02, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, - 0x76, 0x65, 0x72, 0x12, 0x6b, 0x0a, 0x19, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x29, 0x0a, 0x27, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x2c, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, - 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1f, - 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x56, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, - 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x3a, 0x73, 0xea, 0x41, 0x70, 0x0a, 0x25, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x47, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, - 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x32, 0xb1, 0x07, 0x0a, - 0x0a, 0x45, 0x6b, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xba, 0x01, 0x0a, 0x12, - 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, - 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, - 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xda, - 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xa7, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, - 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x54, 0x0a, 0x13, 0x47, + 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, + 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x22, 0x9e, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0a, + 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, + 0x73, 0x6b, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x72, 0x61, 0x77, 0x5f, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x72, 0x61, 0x77, 0x44, 0x65, 0x72, + 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x12, 0x1b, 0x0a, + 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x46, 0x0a, 0x1d, 0x73, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1a, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x12, 0x47, 0x0a, 0x0f, 0x6e, 0x6f, 0x74, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0d, 0x6e, 0x6f, 0x74, + 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x6e, 0x6f, + 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x73, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x12, 0x73, + 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, + 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x11, 0x73, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, + 0xf2, 0x06, 0x0a, 0x0d, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x5f, 0x0a, 0x11, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, + 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x52, 0x10, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x12, 0x17, 0x0a, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x69, 0x0a, 0x13, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, + 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, + 0x65, 0x12, 0x2f, 0x0a, 0x11, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x0f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x53, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, + 0x74, 0x68, 0x1a, 0xa5, 0x02, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x12, 0x6b, 0x0a, 0x19, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2f, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x29, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x12, 0x1f, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0x53, 0x0a, 0x11, 0x4b, 0x65, + 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x23, 0x0a, 0x1f, 0x4b, 0x45, 0x59, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x4d, 0x45, 0x4e, + 0x54, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, 0x4e, 0x55, 0x41, 0x4c, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x5f, 0x4b, 0x4d, 0x53, 0x10, 0x02, 0x3a, + 0x73, 0xea, 0x41, 0x70, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, + 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x47, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xe4, 0x01, 0x0a, 0x09, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x63, 0x0a, 0x16, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x01, + 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x3a, 0x59, 0xea, 0x41, 0x56, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x31, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x5e, 0x0a, 0x19, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xdc, 0x0b, 0x0a, 0x0a, 0x45, 0x6b, + 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xba, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, + 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xa7, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x41, 0xda, 0x41, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, + 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, + 0xe0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x74, 0xda, 0x41, + 0x27, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x2c, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x3a, 0x0e, + 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x32, + 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0xe2, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x41, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, + 0x76, 0xda, 0x41, 0x1a, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x53, 0x3a, 0x0e, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x41, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x94, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0xc3, + 0x01, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, + 0x63, 0xda, 0x41, 0x16, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, + 0x3a, 0x0a, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x36, 0x2f, 0x76, + 0x31, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0xe0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, + 0x66, 0x69, 0x67, 0x7d, 0x12, 0xcb, 0x01, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, - 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x0e, 0x65, 0x6b, 0x6d, - 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0xda, 0x41, 0x27, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x2c, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xe2, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, + 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x54, 0xda, 0x41, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x12, 0x45, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, + 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, + 0x74, 0x79, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, + 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, + 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x85, 0x02, 0xea, 0x41, 0x7c, 0x0a, 0x27, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x51, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, + 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, - 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x76, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x53, 0x32, 0x41, 0x2f, 0x76, 0x31, 0x2f, - 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x0e, 0x65, - 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0xda, 0x41, 0x1a, - 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, - 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, - 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, - 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, - 0x42, 0x92, 0x02, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0f, 0x45, 0x6b, - 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, 0x6d, 0x73, - 0x2f, 0x76, 0x31, 0x3b, 0x6b, 0x6d, 0x73, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, - 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, - 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0xea, 0x41, 0x7c, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x51, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x76, 0x31, 0x42, 0x0f, 0x45, 0x6b, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, + 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, + 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -939,42 +1398,58 @@ func file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP() []byte { return file_google_cloud_kms_v1_ekm_service_proto_rawDescData } -var file_google_cloud_kms_v1_ekm_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_google_cloud_kms_v1_ekm_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_cloud_kms_v1_ekm_service_proto_msgTypes = make([]protoimpl.MessageInfo, 13) var file_google_cloud_kms_v1_ekm_service_proto_goTypes = []interface{}{ - (*ListEkmConnectionsRequest)(nil), // 0: google.cloud.kms.v1.ListEkmConnectionsRequest - (*ListEkmConnectionsResponse)(nil), // 1: google.cloud.kms.v1.ListEkmConnectionsResponse - (*GetEkmConnectionRequest)(nil), // 2: google.cloud.kms.v1.GetEkmConnectionRequest - (*CreateEkmConnectionRequest)(nil), // 3: google.cloud.kms.v1.CreateEkmConnectionRequest - (*UpdateEkmConnectionRequest)(nil), // 4: google.cloud.kms.v1.UpdateEkmConnectionRequest - (*Certificate)(nil), // 5: google.cloud.kms.v1.Certificate - (*EkmConnection)(nil), // 6: google.cloud.kms.v1.EkmConnection - (*EkmConnection_ServiceResolver)(nil), // 7: google.cloud.kms.v1.EkmConnection.ServiceResolver - (*fieldmaskpb.FieldMask)(nil), // 8: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp + (EkmConnection_KeyManagementMode)(0), // 0: google.cloud.kms.v1.EkmConnection.KeyManagementMode + (*ListEkmConnectionsRequest)(nil), // 1: google.cloud.kms.v1.ListEkmConnectionsRequest + (*ListEkmConnectionsResponse)(nil), // 2: google.cloud.kms.v1.ListEkmConnectionsResponse + (*GetEkmConnectionRequest)(nil), // 3: google.cloud.kms.v1.GetEkmConnectionRequest + (*CreateEkmConnectionRequest)(nil), // 4: google.cloud.kms.v1.CreateEkmConnectionRequest + (*UpdateEkmConnectionRequest)(nil), // 5: google.cloud.kms.v1.UpdateEkmConnectionRequest + (*GetEkmConfigRequest)(nil), // 6: google.cloud.kms.v1.GetEkmConfigRequest + (*UpdateEkmConfigRequest)(nil), // 7: google.cloud.kms.v1.UpdateEkmConfigRequest + (*Certificate)(nil), // 8: google.cloud.kms.v1.Certificate + (*EkmConnection)(nil), // 9: google.cloud.kms.v1.EkmConnection + (*EkmConfig)(nil), // 10: google.cloud.kms.v1.EkmConfig + (*VerifyConnectivityRequest)(nil), // 11: google.cloud.kms.v1.VerifyConnectivityRequest + (*VerifyConnectivityResponse)(nil), // 12: google.cloud.kms.v1.VerifyConnectivityResponse + (*EkmConnection_ServiceResolver)(nil), // 13: google.cloud.kms.v1.EkmConnection.ServiceResolver + (*fieldmaskpb.FieldMask)(nil), // 14: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp } var file_google_cloud_kms_v1_ekm_service_proto_depIdxs = []int32{ - 6, // 0: google.cloud.kms.v1.ListEkmConnectionsResponse.ekm_connections:type_name -> google.cloud.kms.v1.EkmConnection - 6, // 1: google.cloud.kms.v1.CreateEkmConnectionRequest.ekm_connection:type_name -> google.cloud.kms.v1.EkmConnection - 6, // 2: google.cloud.kms.v1.UpdateEkmConnectionRequest.ekm_connection:type_name -> google.cloud.kms.v1.EkmConnection - 8, // 3: google.cloud.kms.v1.UpdateEkmConnectionRequest.update_mask:type_name -> google.protobuf.FieldMask - 9, // 4: google.cloud.kms.v1.Certificate.not_before_time:type_name -> google.protobuf.Timestamp - 9, // 5: google.cloud.kms.v1.Certificate.not_after_time:type_name -> google.protobuf.Timestamp - 9, // 6: google.cloud.kms.v1.EkmConnection.create_time:type_name -> google.protobuf.Timestamp - 7, // 7: google.cloud.kms.v1.EkmConnection.service_resolvers:type_name -> google.cloud.kms.v1.EkmConnection.ServiceResolver - 5, // 8: google.cloud.kms.v1.EkmConnection.ServiceResolver.server_certificates:type_name -> google.cloud.kms.v1.Certificate - 0, // 9: google.cloud.kms.v1.EkmService.ListEkmConnections:input_type -> google.cloud.kms.v1.ListEkmConnectionsRequest - 2, // 10: google.cloud.kms.v1.EkmService.GetEkmConnection:input_type -> google.cloud.kms.v1.GetEkmConnectionRequest - 3, // 11: google.cloud.kms.v1.EkmService.CreateEkmConnection:input_type -> google.cloud.kms.v1.CreateEkmConnectionRequest - 4, // 12: google.cloud.kms.v1.EkmService.UpdateEkmConnection:input_type -> google.cloud.kms.v1.UpdateEkmConnectionRequest - 1, // 13: google.cloud.kms.v1.EkmService.ListEkmConnections:output_type -> google.cloud.kms.v1.ListEkmConnectionsResponse - 6, // 14: google.cloud.kms.v1.EkmService.GetEkmConnection:output_type -> google.cloud.kms.v1.EkmConnection - 6, // 15: google.cloud.kms.v1.EkmService.CreateEkmConnection:output_type -> google.cloud.kms.v1.EkmConnection - 6, // 16: google.cloud.kms.v1.EkmService.UpdateEkmConnection:output_type -> google.cloud.kms.v1.EkmConnection - 13, // [13:17] is the sub-list for method output_type - 9, // [9:13] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name + 9, // 0: google.cloud.kms.v1.ListEkmConnectionsResponse.ekm_connections:type_name -> google.cloud.kms.v1.EkmConnection + 9, // 1: google.cloud.kms.v1.CreateEkmConnectionRequest.ekm_connection:type_name -> google.cloud.kms.v1.EkmConnection + 9, // 2: google.cloud.kms.v1.UpdateEkmConnectionRequest.ekm_connection:type_name -> google.cloud.kms.v1.EkmConnection + 14, // 3: google.cloud.kms.v1.UpdateEkmConnectionRequest.update_mask:type_name -> google.protobuf.FieldMask + 10, // 4: google.cloud.kms.v1.UpdateEkmConfigRequest.ekm_config:type_name -> google.cloud.kms.v1.EkmConfig + 14, // 5: google.cloud.kms.v1.UpdateEkmConfigRequest.update_mask:type_name -> google.protobuf.FieldMask + 15, // 6: google.cloud.kms.v1.Certificate.not_before_time:type_name -> google.protobuf.Timestamp + 15, // 7: google.cloud.kms.v1.Certificate.not_after_time:type_name -> google.protobuf.Timestamp + 15, // 8: google.cloud.kms.v1.EkmConnection.create_time:type_name -> google.protobuf.Timestamp + 13, // 9: google.cloud.kms.v1.EkmConnection.service_resolvers:type_name -> google.cloud.kms.v1.EkmConnection.ServiceResolver + 0, // 10: google.cloud.kms.v1.EkmConnection.key_management_mode:type_name -> google.cloud.kms.v1.EkmConnection.KeyManagementMode + 8, // 11: google.cloud.kms.v1.EkmConnection.ServiceResolver.server_certificates:type_name -> google.cloud.kms.v1.Certificate + 1, // 12: google.cloud.kms.v1.EkmService.ListEkmConnections:input_type -> google.cloud.kms.v1.ListEkmConnectionsRequest + 3, // 13: google.cloud.kms.v1.EkmService.GetEkmConnection:input_type -> google.cloud.kms.v1.GetEkmConnectionRequest + 4, // 14: google.cloud.kms.v1.EkmService.CreateEkmConnection:input_type -> google.cloud.kms.v1.CreateEkmConnectionRequest + 5, // 15: google.cloud.kms.v1.EkmService.UpdateEkmConnection:input_type -> google.cloud.kms.v1.UpdateEkmConnectionRequest + 6, // 16: google.cloud.kms.v1.EkmService.GetEkmConfig:input_type -> google.cloud.kms.v1.GetEkmConfigRequest + 7, // 17: google.cloud.kms.v1.EkmService.UpdateEkmConfig:input_type -> google.cloud.kms.v1.UpdateEkmConfigRequest + 11, // 18: google.cloud.kms.v1.EkmService.VerifyConnectivity:input_type -> google.cloud.kms.v1.VerifyConnectivityRequest + 2, // 19: google.cloud.kms.v1.EkmService.ListEkmConnections:output_type -> google.cloud.kms.v1.ListEkmConnectionsResponse + 9, // 20: google.cloud.kms.v1.EkmService.GetEkmConnection:output_type -> google.cloud.kms.v1.EkmConnection + 9, // 21: google.cloud.kms.v1.EkmService.CreateEkmConnection:output_type -> google.cloud.kms.v1.EkmConnection + 9, // 22: google.cloud.kms.v1.EkmService.UpdateEkmConnection:output_type -> google.cloud.kms.v1.EkmConnection + 10, // 23: google.cloud.kms.v1.EkmService.GetEkmConfig:output_type -> google.cloud.kms.v1.EkmConfig + 10, // 24: google.cloud.kms.v1.EkmService.UpdateEkmConfig:output_type -> google.cloud.kms.v1.EkmConfig + 12, // 25: google.cloud.kms.v1.EkmService.VerifyConnectivity:output_type -> google.cloud.kms.v1.VerifyConnectivityResponse + 19, // [19:26] is the sub-list for method output_type + 12, // [12:19] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name } func init() { file_google_cloud_kms_v1_ekm_service_proto_init() } @@ -1044,7 +1519,7 @@ func file_google_cloud_kms_v1_ekm_service_proto_init() { } } file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Certificate); i { + switch v := v.(*GetEkmConfigRequest); i { case 0: return &v.state case 1: @@ -1056,7 +1531,7 @@ func file_google_cloud_kms_v1_ekm_service_proto_init() { } } file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EkmConnection); i { + switch v := v.(*UpdateEkmConfigRequest); i { case 0: return &v.state case 1: @@ -1068,6 +1543,66 @@ func file_google_cloud_kms_v1_ekm_service_proto_init() { } } file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Certificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EkmConnection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EkmConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VerifyConnectivityRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VerifyConnectivityResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*EkmConnection_ServiceResolver); i { case 0: return &v.state @@ -1085,13 +1620,14 @@ func file_google_cloud_kms_v1_ekm_service_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_cloud_kms_v1_ekm_service_proto_rawDesc, - NumEnums: 0, - NumMessages: 8, + NumEnums: 1, + NumMessages: 13, NumExtensions: 0, NumServices: 1, }, GoTypes: file_google_cloud_kms_v1_ekm_service_proto_goTypes, DependencyIndexes: file_google_cloud_kms_v1_ekm_service_proto_depIdxs, + EnumInfos: file_google_cloud_kms_v1_ekm_service_proto_enumTypes, MessageInfos: file_google_cloud_kms_v1_ekm_service_proto_msgTypes, }.Build() File_google_cloud_kms_v1_ekm_service_proto = out.File @@ -1122,6 +1658,18 @@ type EkmServiceClient interface { CreateEkmConnection(ctx context.Context, in *CreateEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) // Updates an [EkmConnection][google.cloud.kms.v1.EkmConnection]'s metadata. UpdateEkmConnection(ctx context.Context, in *UpdateEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) + // Returns the [EkmConfig][google.cloud.kms.v1.EkmConfig] singleton resource + // for a given project and location. + GetEkmConfig(ctx context.Context, in *GetEkmConfigRequest, opts ...grpc.CallOption) (*EkmConfig, error) + // Updates the [EkmConfig][google.cloud.kms.v1.EkmConfig] singleton resource + // for a given project and location. + UpdateEkmConfig(ctx context.Context, in *UpdateEkmConfigRequest, opts ...grpc.CallOption) (*EkmConfig, error) + // Verifies that Cloud KMS can successfully connect to the external key + // manager specified by an [EkmConnection][google.cloud.kms.v1.EkmConnection]. + // If there is an error connecting to the EKM, this method returns a + // FAILED_PRECONDITION status containing structured information as described + // at https://cloud.google.com/kms/docs/reference/ekm_errors. + VerifyConnectivity(ctx context.Context, in *VerifyConnectivityRequest, opts ...grpc.CallOption) (*VerifyConnectivityResponse, error) } type ekmServiceClient struct { @@ -1168,6 +1716,33 @@ func (c *ekmServiceClient) UpdateEkmConnection(ctx context.Context, in *UpdateEk return out, nil } +func (c *ekmServiceClient) GetEkmConfig(ctx context.Context, in *GetEkmConfigRequest, opts ...grpc.CallOption) (*EkmConfig, error) { + out := new(EkmConfig) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/GetEkmConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ekmServiceClient) UpdateEkmConfig(ctx context.Context, in *UpdateEkmConfigRequest, opts ...grpc.CallOption) (*EkmConfig, error) { + out := new(EkmConfig) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/UpdateEkmConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ekmServiceClient) VerifyConnectivity(ctx context.Context, in *VerifyConnectivityRequest, opts ...grpc.CallOption) (*VerifyConnectivityResponse, error) { + out := new(VerifyConnectivityResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/VerifyConnectivity", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // EkmServiceServer is the server API for EkmService service. type EkmServiceServer interface { // Lists [EkmConnections][google.cloud.kms.v1.EkmConnection]. @@ -1180,6 +1755,18 @@ type EkmServiceServer interface { CreateEkmConnection(context.Context, *CreateEkmConnectionRequest) (*EkmConnection, error) // Updates an [EkmConnection][google.cloud.kms.v1.EkmConnection]'s metadata. UpdateEkmConnection(context.Context, *UpdateEkmConnectionRequest) (*EkmConnection, error) + // Returns the [EkmConfig][google.cloud.kms.v1.EkmConfig] singleton resource + // for a given project and location. + GetEkmConfig(context.Context, *GetEkmConfigRequest) (*EkmConfig, error) + // Updates the [EkmConfig][google.cloud.kms.v1.EkmConfig] singleton resource + // for a given project and location. + UpdateEkmConfig(context.Context, *UpdateEkmConfigRequest) (*EkmConfig, error) + // Verifies that Cloud KMS can successfully connect to the external key + // manager specified by an [EkmConnection][google.cloud.kms.v1.EkmConnection]. + // If there is an error connecting to the EKM, this method returns a + // FAILED_PRECONDITION status containing structured information as described + // at https://cloud.google.com/kms/docs/reference/ekm_errors. + VerifyConnectivity(context.Context, *VerifyConnectivityRequest) (*VerifyConnectivityResponse, error) } // UnimplementedEkmServiceServer can be embedded to have forward compatible implementations. @@ -1198,6 +1785,15 @@ func (*UnimplementedEkmServiceServer) CreateEkmConnection(context.Context, *Crea func (*UnimplementedEkmServiceServer) UpdateEkmConnection(context.Context, *UpdateEkmConnectionRequest) (*EkmConnection, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateEkmConnection not implemented") } +func (*UnimplementedEkmServiceServer) GetEkmConfig(context.Context, *GetEkmConfigRequest) (*EkmConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetEkmConfig not implemented") +} +func (*UnimplementedEkmServiceServer) UpdateEkmConfig(context.Context, *UpdateEkmConfigRequest) (*EkmConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateEkmConfig not implemented") +} +func (*UnimplementedEkmServiceServer) VerifyConnectivity(context.Context, *VerifyConnectivityRequest) (*VerifyConnectivityResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifyConnectivity not implemented") +} func RegisterEkmServiceServer(s *grpc.Server, srv EkmServiceServer) { s.RegisterService(&_EkmService_serviceDesc, srv) @@ -1275,6 +1871,60 @@ func _EkmService_UpdateEkmConnection_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _EkmService_GetEkmConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetEkmConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).GetEkmConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/GetEkmConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).GetEkmConfig(ctx, req.(*GetEkmConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EkmService_UpdateEkmConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateEkmConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).UpdateEkmConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/UpdateEkmConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).UpdateEkmConfig(ctx, req.(*UpdateEkmConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EkmService_VerifyConnectivity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyConnectivityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).VerifyConnectivity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/VerifyConnectivity", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).VerifyConnectivity(ctx, req.(*VerifyConnectivityRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _EkmService_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.cloud.kms.v1.EkmService", HandlerType: (*EkmServiceServer)(nil), @@ -1295,6 +1945,18 @@ var _EkmService_serviceDesc = grpc.ServiceDesc{ MethodName: "UpdateEkmConnection", Handler: _EkmService_UpdateEkmConnection_Handler, }, + { + MethodName: "GetEkmConfig", + Handler: _EkmService_GetEkmConfig_Handler, + }, + { + MethodName: "UpdateEkmConfig", + Handler: _EkmService_UpdateEkmConfig_Handler, + }, + { + MethodName: "VerifyConnectivity", + Handler: _EkmService_VerifyConnectivity_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "google/cloud/kms/v1/ekm_service.proto", diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go index 83b3568c..d806e35d 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc-gen-go v1.30.0 +// protoc v4.23.2 // source: google/cloud/kms/v1/resources.proto package kmspb @@ -325,11 +325,17 @@ const ( // RSAES-OAEP 4096 bit key with a SHA1 digest. CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA1 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 39 // ECDSA on the NIST P-256 curve with a SHA256 digest. + // Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms CryptoKeyVersion_EC_SIGN_P256_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 12 // ECDSA on the NIST P-384 curve with a SHA384 digest. + // Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms CryptoKeyVersion_EC_SIGN_P384_SHA384 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 13 // ECDSA on the non-NIST secp256k1 curve. This curve is only supported for // HSM protection level. + // Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms CryptoKeyVersion_EC_SIGN_SECP256K1_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 31 // HMAC-SHA256 signing with a 256 bit key. CryptoKeyVersion_HMAC_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 32 @@ -484,19 +490,36 @@ const ( // Additional details can be found in // [CryptoKeyVersion.import_failure_reason][google.cloud.kms.v1.CryptoKeyVersion.import_failure_reason]. CryptoKeyVersion_IMPORT_FAILED CryptoKeyVersion_CryptoKeyVersionState = 7 + // This version was not generated successfully. It may not be used, enabled, + // disabled, or destroyed. Additional details can be found in + // [CryptoKeyVersion.generation_failure_reason][google.cloud.kms.v1.CryptoKeyVersion.generation_failure_reason]. + CryptoKeyVersion_GENERATION_FAILED CryptoKeyVersion_CryptoKeyVersionState = 8 + // This version was destroyed, and it may not be used or enabled again. + // Cloud KMS is waiting for the corresponding key material residing in an + // external key manager to be destroyed. + CryptoKeyVersion_PENDING_EXTERNAL_DESTRUCTION CryptoKeyVersion_CryptoKeyVersionState = 9 + // This version was destroyed, and it may not be used or enabled again. + // However, Cloud KMS could not confirm that the corresponding key material + // residing in an external key manager was destroyed. Additional details can + // be found in + // [CryptoKeyVersion.external_destruction_failure_reason][google.cloud.kms.v1.CryptoKeyVersion.external_destruction_failure_reason]. + CryptoKeyVersion_EXTERNAL_DESTRUCTION_FAILED CryptoKeyVersion_CryptoKeyVersionState = 10 ) // Enum value maps for CryptoKeyVersion_CryptoKeyVersionState. var ( CryptoKeyVersion_CryptoKeyVersionState_name = map[int32]string{ - 0: "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED", - 5: "PENDING_GENERATION", - 1: "ENABLED", - 2: "DISABLED", - 3: "DESTROYED", - 4: "DESTROY_SCHEDULED", - 6: "PENDING_IMPORT", - 7: "IMPORT_FAILED", + 0: "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED", + 5: "PENDING_GENERATION", + 1: "ENABLED", + 2: "DISABLED", + 3: "DESTROYED", + 4: "DESTROY_SCHEDULED", + 6: "PENDING_IMPORT", + 7: "IMPORT_FAILED", + 8: "GENERATION_FAILED", + 9: "PENDING_EXTERNAL_DESTRUCTION", + 10: "EXTERNAL_DESTRUCTION_FAILED", } CryptoKeyVersion_CryptoKeyVersionState_value = map[string]int32{ "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED": 0, @@ -507,6 +530,9 @@ var ( "DESTROY_SCHEDULED": 4, "PENDING_IMPORT": 6, "IMPORT_FAILED": 7, + "GENERATION_FAILED": 8, + "PENDING_EXTERNAL_DESTRUCTION": 9, + "EXTERNAL_DESTRUCTION_FAILED": 10, } ) @@ -875,7 +901,6 @@ type CryptoKey struct { // Controls the rate of automatic rotation. // // Types that are assignable to RotationSchedule: - // // *CryptoKey_RotationPeriod RotationSchedule isCryptoKey_RotationSchedule `protobuf_oneof:"rotation_schedule"` // A template describing settings for new @@ -1261,6 +1286,15 @@ type CryptoKeyVersion struct { // if [state][google.cloud.kms.v1.CryptoKeyVersion.state] is // [IMPORT_FAILED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.IMPORT_FAILED]. ImportFailureReason string `protobuf:"bytes,16,opt,name=import_failure_reason,json=importFailureReason,proto3" json:"import_failure_reason,omitempty"` + // Output only. The root cause of the most recent generation failure. Only + // present if [state][google.cloud.kms.v1.CryptoKeyVersion.state] is + // [GENERATION_FAILED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.GENERATION_FAILED]. + GenerationFailureReason string `protobuf:"bytes,19,opt,name=generation_failure_reason,json=generationFailureReason,proto3" json:"generation_failure_reason,omitempty"` + // Output only. The root cause of the most recent external destruction + // failure. Only present if + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] is + // [EXTERNAL_DESTRUCTION_FAILED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.EXTERNAL_DESTRUCTION_FAILED]. + ExternalDestructionFailureReason string `protobuf:"bytes,20,opt,name=external_destruction_failure_reason,json=externalDestructionFailureReason,proto3" json:"external_destruction_failure_reason,omitempty"` // ExternalProtectionLevelOptions stores a group of additional fields for // configuring a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] that // are specific to the @@ -1390,6 +1424,20 @@ func (x *CryptoKeyVersion) GetImportFailureReason() string { return "" } +func (x *CryptoKeyVersion) GetGenerationFailureReason() string { + if x != nil { + return x.GenerationFailureReason + } + return "" +} + +func (x *CryptoKeyVersion) GetExternalDestructionFailureReason() string { + if x != nil { + return x.ExternalDestructionFailureReason + } + return "" +} + func (x *CryptoKeyVersion) GetExternalProtectionLevelOptions() *ExternalProtectionLevelOptions { if x != nil { return x.ExternalProtectionLevelOptions @@ -2032,7 +2080,7 @@ var file_google_cloud_kms_v1_resources_proto_rawDesc = []byte{ 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x56, 0x49, 0x55, 0x4d, 0x5f, 0x56, 0x31, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x45, 0x44, 0x10, 0x03, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x56, 0x49, 0x55, 0x4d, 0x5f, 0x56, 0x32, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, - 0x45, 0x44, 0x10, 0x04, 0x22, 0x9f, 0x12, 0x0a, 0x10, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x45, 0x44, 0x10, 0x04, 0x22, 0x8e, 0x14, 0x0a, 0x10, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, @@ -2084,232 +2132,246 @@ var file_google_cloud_kms_v1_resources_proto_rawDesc = []byte{ 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x15, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x69, 0x6d, 0x70, 0x6f, 0x72, - 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x7e, - 0x0a, 0x21, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x1e, - 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, - 0x0a, 0x11, 0x72, 0x65, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6c, 0x69, 0x67, 0x69, - 0x62, 0x6c, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, - 0x72, 0x65, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c, 0x65, - 0x22, 0xe7, 0x06, 0x0a, 0x19, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x2c, - 0x0a, 0x28, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, - 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x4c, 0x47, 0x4f, 0x52, 0x49, 0x54, 0x48, 0x4d, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, - 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, - 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1c, 0x0a, - 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x32, 0x30, - 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x52, - 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x33, 0x30, 0x37, 0x32, - 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, - 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, - 0x35, 0x31, 0x32, 0x10, 0x0f, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, - 0x32, 0x35, 0x36, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, - 0x32, 0x35, 0x36, 0x10, 0x06, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, - 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, - 0x35, 0x31, 0x32, 0x10, 0x10, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x52, 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x32, 0x30, 0x34, 0x38, - 0x10, 0x1c, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, - 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x10, 0x1d, 0x12, - 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x57, 0x5f, - 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x10, 0x1e, 0x12, 0x20, 0x0a, 0x1c, - 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, - 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x08, 0x12, 0x20, - 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, - 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x09, - 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, - 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, - 0x10, 0x0a, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, - 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x35, - 0x31, 0x32, 0x10, 0x11, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, - 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, - 0x41, 0x31, 0x10, 0x25, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, - 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, - 0x41, 0x31, 0x10, 0x26, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, - 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, - 0x41, 0x31, 0x10, 0x27, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x50, 0x32, 0x35, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0c, 0x12, 0x17, 0x0a, - 0x13, 0x45, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, - 0x41, 0x33, 0x38, 0x34, 0x10, 0x0d, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x43, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x4b, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, - 0x35, 0x36, 0x10, 0x1f, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, - 0x32, 0x35, 0x36, 0x10, 0x20, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, - 0x41, 0x31, 0x10, 0x21, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, - 0x33, 0x38, 0x34, 0x10, 0x22, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, - 0x41, 0x35, 0x31, 0x32, 0x10, 0x23, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x32, 0x34, 0x10, 0x24, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x58, 0x54, 0x45, 0x52, - 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x45, 0x4e, - 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x12, 0x22, 0xc1, 0x01, 0x0a, 0x15, 0x43, - 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x24, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, - 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, - 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, - 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x4e, 0x41, 0x42, 0x4c, 0x45, - 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, - 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, - 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x5f, 0x53, 0x43, 0x48, 0x45, - 0x44, 0x55, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x45, 0x4e, 0x44, 0x49, - 0x4e, 0x47, 0x5f, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x49, - 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x07, 0x22, 0x49, - 0x0a, 0x14, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x56, 0x69, 0x65, 0x77, 0x12, 0x27, 0x0a, 0x23, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, - 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, - 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x01, 0x3a, 0xaa, 0x01, 0xea, 0x41, 0xa6, 0x01, - 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, - 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, - 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xce, 0x03, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, - 0x63, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x70, 0x65, 0x6d, 0x12, 0x5d, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, - 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, - 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x65, 0x6d, 0x5f, 0x63, 0x72, 0x63, - 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, - 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x65, 0x6d, 0x43, 0x72, 0x63, 0x33, 0x32, - 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, - 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0xae, 0x01, 0xea, 0x41, 0xaa, 0x01, 0x0a, 0x21, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, - 0x12, 0x84, 0x01, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, - 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x70, 0x75, - 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0xd4, 0x09, 0x0a, 0x09, 0x49, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x58, - 0x0a, 0x0d, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0c, 0x69, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x57, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, - 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, - 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x48, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, - 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, - 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x54, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, - 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, - 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x25, 0x0a, - 0x11, 0x57, 0x72, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, - 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x70, 0x65, 0x6d, 0x22, 0xe5, 0x01, 0x0a, 0x0c, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1d, 0x0a, 0x19, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, - 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, - 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, - 0x35, 0x36, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, - 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, - 0x35, 0x36, 0x10, 0x02, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, - 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f, 0x41, 0x45, 0x53, - 0x5f, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, - 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f, 0x41, - 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, - 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, - 0x10, 0x05, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, - 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x22, 0x63, 0x0a, 0x0e, - 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, - 0x0a, 0x1c, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4a, 0x4f, 0x42, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x16, 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, - 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, - 0x56, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, - 0x03, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3f, + 0x0a, 0x19, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x17, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, + 0x52, 0x0a, 0x23, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, + 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x20, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x65, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x21, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x1e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x11, 0x72, 0x65, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x65, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x10, 0x72, 0x65, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6c, 0x69, + 0x67, 0x69, 0x62, 0x6c, 0x65, 0x22, 0xe7, 0x06, 0x0a, 0x19, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x12, 0x2c, 0x0a, 0x28, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, + 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x4c, 0x47, 0x4f, 0x52, 0x49, + 0x54, 0x48, 0x4d, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x53, 0x59, 0x4d, 0x4d, + 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, + 0x53, 0x53, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, + 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, + 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x1c, + 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x34, + 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x1c, 0x0a, 0x18, + 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x34, 0x30, 0x39, + 0x36, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x0f, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, + 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x32, 0x30, 0x34, + 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, + 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x33, 0x30, 0x37, + 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, + 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39, + 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, + 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39, + 0x36, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x10, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, + 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, + 0x5f, 0x32, 0x30, 0x34, 0x38, 0x10, 0x1c, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41, 0x5f, 0x53, + 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x33, 0x30, + 0x37, 0x32, 0x10, 0x1d, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, + 0x5f, 0x52, 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x10, + 0x1e, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, + 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, + 0x36, 0x10, 0x08, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, + 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x09, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, + 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0a, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, + 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, + 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x11, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, + 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x32, 0x30, + 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x25, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, + 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, + 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x26, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, + 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, + 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x27, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x43, 0x5f, + 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x32, 0x35, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, + 0x10, 0x0c, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x33, + 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x0d, 0x12, 0x1c, 0x0a, 0x18, 0x45, + 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x4b, 0x31, + 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x1f, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, + 0x43, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x20, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x4d, + 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x21, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, + 0x43, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x22, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, + 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x23, 0x12, 0x0f, 0x0a, 0x0b, 0x48, + 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x32, 0x34, 0x10, 0x24, 0x12, 0x21, 0x0a, 0x1d, + 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, + 0x49, 0x43, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x12, 0x22, + 0x9b, 0x02, 0x0a, 0x15, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x24, 0x43, 0x52, 0x59, + 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x47, + 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x45, + 0x4e, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, + 0x42, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, + 0x59, 0x45, 0x44, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, + 0x5f, 0x53, 0x43, 0x48, 0x45, 0x44, 0x55, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, + 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x10, 0x06, + 0x12, 0x11, 0x0a, 0x0d, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, + 0x44, 0x10, 0x07, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x08, 0x12, 0x20, 0x0a, 0x1c, 0x50, 0x45, + 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x44, + 0x45, 0x53, 0x54, 0x52, 0x55, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x1f, 0x0a, 0x1b, + 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x52, 0x55, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x0a, 0x22, 0x49, 0x0a, + 0x14, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x56, 0x69, 0x65, 0x77, 0x12, 0x27, 0x0a, 0x23, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, + 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, + 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x01, 0x3a, 0xaa, 0x01, 0xea, 0x41, 0xa6, 0x01, 0x0a, + 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, - 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, - 0x73, 0x2f, 0x7b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x7d, 0x22, 0x81, - 0x01, 0x0a, 0x1e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, - 0x79, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x55, 0x72, 0x69, 0x12, 0x35, 0x0a, 0x17, 0x65, - 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, - 0x79, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x65, 0x6b, - 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x50, 0x61, - 0x74, 0x68, 0x2a, 0x6a, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x50, 0x52, 0x4f, 0x54, 0x45, 0x43, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4f, 0x46, 0x54, 0x57, - 0x41, 0x52, 0x45, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x48, 0x53, 0x4d, 0x10, 0x02, 0x12, 0x0c, - 0x0a, 0x08, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, - 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x56, 0x50, 0x43, 0x10, 0x04, 0x42, 0x95, - 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4b, 0x6d, 0x73, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, 0x6d, 0x73, - 0x2f, 0x76, 0x31, 0x3b, 0x6b, 0x6d, 0x73, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, - 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, - 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xce, 0x03, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x70, 0x65, 0x6d, 0x12, 0x5d, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, + 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x65, 0x6d, 0x5f, 0x63, 0x72, 0x63, 0x33, + 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x65, 0x6d, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0xae, 0x01, 0xea, 0x41, 0xaa, 0x01, 0x0a, 0x21, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, + 0x84, 0x01, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0xd4, 0x09, 0x0a, 0x09, 0x49, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x58, 0x0a, + 0x0d, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0c, 0x69, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x57, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, + 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x48, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x54, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x25, 0x0a, 0x11, + 0x57, 0x72, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x70, 0x65, 0x6d, 0x22, 0xe5, 0x01, 0x0a, 0x0c, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1d, 0x0a, 0x19, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4d, + 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, + 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, + 0x36, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, + 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, + 0x36, 0x10, 0x02, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, + 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f, 0x41, 0x45, 0x53, 0x5f, + 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, + 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f, 0x41, 0x45, + 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, 0x4f, + 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x05, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, + 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x22, 0x63, 0x0a, 0x0e, 0x49, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, + 0x1c, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4a, 0x4f, 0x42, 0x5f, 0x53, 0x54, 0x41, 0x54, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x16, 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, + 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, + 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, + 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, + 0x2f, 0x7b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x7d, 0x22, 0x81, 0x01, + 0x0a, 0x1e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x55, 0x72, 0x69, 0x12, 0x35, 0x0a, 0x17, 0x65, 0x6b, + 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x65, 0x6b, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x74, + 0x68, 0x2a, 0x6a, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x50, 0x52, 0x4f, 0x54, 0x45, 0x43, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4f, 0x46, 0x54, 0x57, 0x41, + 0x52, 0x45, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x48, 0x53, 0x4d, 0x10, 0x02, 0x12, 0x0c, 0x0a, + 0x08, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x45, + 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x56, 0x50, 0x43, 0x10, 0x04, 0x42, 0x88, 0x01, + 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4b, 0x6d, 0x73, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, + 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, + 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go index d345c167..0a9f8bb3 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc-gen-go v1.30.0 +// protoc v4.23.2 // source: google/cloud/kms/v1/service.proto package kmspb @@ -1285,16 +1285,14 @@ type ImportCryptoKeyVersionRequest struct { // // this field must contain the concatenation of: //
    - // - //
  1. An ephemeral AES-256 wrapping key wrapped with the - // [public_key][google.cloud.kms.v1.ImportJob.public_key] using - // RSAES-OAEP with SHA-1/SHA-256, MGF1 with SHA-1/SHA-256, and an empty - // label. - //
  2. - //
  3. The formatted key to be imported, wrapped with the ephemeral AES-256 - // key using AES-KWP (RFC 5649). - //
  4. - // + //
  5. An ephemeral AES-256 wrapping key wrapped with the + // [public_key][google.cloud.kms.v1.ImportJob.public_key] using + // RSAES-OAEP with SHA-1/SHA-256, MGF1 with SHA-1/SHA-256, and an empty + // label. + //
  6. + //
  7. The formatted key to be imported, wrapped with the ephemeral AES-256 + // key using AES-KWP (RFC 5649). + //
  8. //
// // This format is the same as the format produced by PKCS#11 mechanism @@ -1314,7 +1312,6 @@ type ImportCryptoKeyVersionRequest struct { // instead. // // Types that are assignable to WrappedKeyMaterial: - // // *ImportCryptoKeyVersionRequest_RsaAesWrappedKey WrappedKeyMaterial isImportCryptoKeyVersionRequest_WrappedKeyMaterial `protobuf_oneof:"wrapped_key_material"` } @@ -3350,7 +3347,6 @@ type Digest struct { // Required. The message digest. // // Types that are assignable to Digest: - // // *Digest_Sha256 // *Digest_Sha384 // *Digest_Sha512 @@ -4047,23 +4043,23 @@ var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, - 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, - 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xb5, 0x01, 0x0a, 0x0e, 0x4c, 0x69, + 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0xda, 0x41, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x12, 0xb5, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, - 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0xde, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4a, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x73, 0x12, 0xde, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, @@ -4071,119 +4067,119 @@ var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x5e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x4d, 0x2f, 0x76, 0x31, 0x2f, - 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, - 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, - 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x12, 0xb5, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, + 0x73, 0x65, 0x22, 0x5e, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x4f, 0x12, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, + 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0xb5, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4a, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, - 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, - 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x8f, 0x01, 0x0a, 0x0a, 0x47, + 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, + 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x8f, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x22, - 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, - 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xa2, 0x01, 0x0a, + 0x3b, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, + 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, + 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa2, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x22, 0x48, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, - 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0xcb, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x22, 0x48, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, + 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, + 0x7d, 0x12, 0xcb, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x5c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, - 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, - 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x6e, 0x22, 0x5c, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4f, + 0x12, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xc0, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x66, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x59, 0x12, 0x57, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, - 0x7d, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0xda, 0x41, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x66, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x12, 0x57, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0xa2, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x48, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x2f, 0x2a, 0x7d, - 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb6, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x48, 0xda, + 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, + 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, + 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x4a, 0x6f, 0x62, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb6, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x69, - 0x6e, 0x67, 0x22, 0x5c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x22, 0x2c, 0x2f, 0x76, 0x31, 0x2f, - 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, - 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x3a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, - 0x6e, 0x67, 0xda, 0x41, 0x1b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6b, 0x65, 0x79, 0x5f, - 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x64, 0x2c, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, + 0x6e, 0x67, 0x22, 0x5c, 0xda, 0x41, 0x1b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6b, 0x65, + 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x64, 0x2c, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, + 0x6e, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x3a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, + 0x6e, 0x67, 0x22, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x12, 0xcf, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, - 0x79, 0x22, 0x6f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, - 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x4b, 0x65, 0x79, 0x73, 0x3a, 0x0a, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, - 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, - 0x65, 0x79, 0x12, 0xfb, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, + 0x79, 0x22, 0x6f, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x3a, 0x0a, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x73, 0x12, 0xfb, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, - 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x63, 0x22, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0xda, 0x41, 0x19, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x63, 0x3a, 0x12, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x3a, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0xda, 0x41, 0x19, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0xd4, 0x01, 0x0a, 0x16, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, @@ -4191,39 +4187,39 @@ var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x22, 0x54, - 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x69, 0x6d, - 0x70, 0x6f, 0x72, 0x74, 0x3a, 0x01, 0x2a, 0x12, 0xcf, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x3a, 0x01, + 0x2a, 0x22, 0x54, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x3a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x12, 0xcf, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, - 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x6f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, - 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, - 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x3a, 0x0a, 0x69, 0x6d, 0x70, - 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x2c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x2c, 0x69, - 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x12, 0xd1, 0x01, 0x0a, 0x0f, 0x55, 0x70, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x6f, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x2c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x69, + 0x64, 0x2c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x47, 0x3a, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x22, 0x39, + 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0xd1, 0x01, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x22, 0x71, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x52, 0x32, 0x44, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, - 0x6b, 0x65, 0x79, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, - 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x0a, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x5f, 0x6b, 0x65, 0x79, 0xda, 0x41, 0x16, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, - 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x93, 0x02, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x22, 0x71, 0xda, 0x41, 0x16, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x52, 0x3a, 0x0a, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x32, 0x44, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x93, 0x02, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, @@ -4231,17 +4227,17 @@ var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x22, 0x9d, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x76, 0x32, 0x60, 0x2f, 0x76, - 0x31, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x12, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0xda, 0x41, 0x1e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x12, 0xf2, 0x01, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, + 0x69, 0x6f, 0x6e, 0x22, 0x9d, 0x01, 0xda, 0x41, 0x1e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x76, 0x3a, 0x12, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x32, 0x60, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, + 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x7d, 0x12, 0xf2, 0x01, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, @@ -4249,14 +4245,14 @@ var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ 0x72, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, - 0x22, 0x76, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x53, 0x22, 0x4e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, + 0x22, 0x76, 0xda, 0x41, 0x1a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x53, 0x3a, 0x01, 0x2a, 0x22, 0x4e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x1a, 0x6e, 0x61, - 0x6d, 0x65, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x73, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x72, @@ -4264,13 +4260,13 @@ var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x22, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, - 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, - 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x3a, - 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x52, 0x65, + 0x22, 0x67, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5a, 0x3a, + 0x01, 0x2a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, + 0x7d, 0x3a, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, @@ -4278,50 +4274,50 @@ var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, - 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, - 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb4, 0x01, 0x0a, 0x07, 0x45, + 0x6e, 0x22, 0x67, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5a, + 0x3a, 0x01, 0x2a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0xb4, 0x01, 0x0a, 0x07, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x5e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x22, 0x42, 0x2f, 0x76, 0x31, 0x2f, 0x7b, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, - 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, - 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x3a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x3a, 0x01, 0x2a, - 0xda, 0x41, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, + 0x65, 0x22, 0x5e, 0xda, 0x41, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x70, 0x6c, 0x61, 0x69, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x3a, 0x01, 0x2a, 0x22, 0x42, 0x2f, + 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, + 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x3a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0xb4, 0x01, 0x0a, 0x07, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x46, - 0x22, 0x41, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x65, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x69, - 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0xe0, 0x01, 0x0a, 0x0e, 0x41, 0x73, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5e, 0xda, 0x41, 0x0f, 0x6e, 0x61, 0x6d, + 0x65, 0x2c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x46, 0x3a, 0x01, 0x2a, 0x22, 0x41, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, + 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, + 0x3a, 0x64, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0xe0, 0x01, 0x0a, 0x0e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x75, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x61, 0x22, 0x5c, 0x2f, 0x76, - 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, - 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x73, 0x79, 0x6d, - 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0xf0, 0x01, 0x0a, 0x11, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x75, 0xda, 0x41, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x61, 0x3a, 0x01, 0x2a, 0x22, 0x5c, 0x2f, + 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, + 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x73, 0x79, + 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0xf0, 0x01, 0x0a, 0x11, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, @@ -4329,40 +4325,40 @@ var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x7c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x64, 0x22, 0x5f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, - 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, - 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0f, 0x6e, - 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0xc2, + 0x22, 0x7c, 0xda, 0x41, 0x0f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, + 0x74, 0x65, 0x78, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x64, 0x3a, 0x01, 0x2a, 0x22, 0x5f, 0x2f, + 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, + 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x73, 0x79, + 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0xc2, 0x01, 0x0a, 0x07, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5a, 0x22, 0x55, 0x2f, - 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, - 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, - 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x61, 0x63, - 0x53, 0x69, 0x67, 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, - 0x61, 0x74, 0x61, 0x12, 0xce, 0x01, 0x0a, 0x09, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, + 0x61, 0x74, 0x61, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5a, 0x3a, 0x01, 0x2a, 0x22, 0x55, 0x2f, 0x76, + 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, + 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x61, 0x63, 0x53, + 0x69, 0x67, 0x6e, 0x12, 0xce, 0x01, 0x0a, 0x09, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5c, 0x22, 0x57, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, - 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, - 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x61, 0x74, 0x61, - 0x2c, 0x6d, 0x61, 0x63, 0x12, 0xe7, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x22, 0x72, 0xda, 0x41, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x6d, + 0x61, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5c, 0x3a, 0x01, 0x2a, 0x22, 0x57, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, + 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x61, 0x63, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x12, 0xe7, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, @@ -4370,13 +4366,13 @@ var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x6d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x26, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2c, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x2c, 0x70, 0x72, - 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x1a, 0x74, + 0x6d, 0xda, 0x41, 0x26, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, + 0x3a, 0x01, 0x2a, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, @@ -4384,16 +4380,15 @@ var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x8c, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, - 0x42, 0x08, 0x4b, 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x76, 0x31, - 0x3b, 0x6b, 0x6d, 0x73, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, - 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x7f, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, + 0x08, 0x4b, 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, + 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, + 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, + 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/cloud.google.com/go/kms/internal/version.go b/vendor/cloud.google.com/go/kms/internal/version.go index 0ad9373b..5ac4a843 100644 --- a/vendor/cloud.google.com/go/kms/internal/version.go +++ b/vendor/cloud.google.com/go/kms/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.8.0" +const Version = "1.12.1" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md new file mode 100644 index 00000000..c4da5d78 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -0,0 +1,572 @@ +# Release History + +## 1.6.1 (2023-06-06) + +### Bugs Fixed +* Retry policy always clones the underlying `*http.Request` before invoking the next policy. +* Added some non-standard error codes to the list of error codes for unregistered resource providers. +* Fixed an issue in `azcore.NewClient()` and `arm.NewClient()` that could cause an incorrect module name to be used in telemetry. + +## 1.6.0 (2023-05-04) + +### Features Added +* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. +* Added `TenantID` field to `policy.TokenRequestOptions`. + +## 1.5.0 (2023-04-06) + +### Features Added +* Added `ShouldRetry` to `policy.RetryOptions` for finer-grained control over when to retry. + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.5.0-beta.1 +> These features will return in v1.6.0-beta.1. +* Removed `TokenRequestOptions.Claims` and `.TenantID` +* Removed ARM client support for CAE and cross-tenant auth. + +### Bugs Fixed +* Added non-conformant LRO terminal states `Cancelled` and `Completed`. + +### Other Changes +* Updated to latest `internal` module. + +## 1.5.0-beta.1 (2023-03-02) + +### Features Added +* This release includes the features added in v1.4.0-beta.1 + +## 1.4.0 (2023-03-02) +> This release doesn't include features added in v1.4.0-beta.1. They will return in v1.5.0-beta.1. + +### Features Added +* Add `Clone()` method for `arm/policy.ClientOptions`. + +### Bugs Fixed +* ARM's RP registration policy will no longer swallow unrecognized errors. +* Fixed an issue in `runtime.NewPollerFromResumeToken()` when resuming a `Poller` with a custom `PollingHandler`. +* Fixed wrong policy copy in `arm/runtime.NewPipeline()`. + +## 1.4.0-beta.1 (2023-02-02) + +### Features Added +* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. +* Added `Claims` and `TenantID` fields to `policy.TokenRequestOptions`. +* ARM bearer token policy handles CAE challenges. + +## 1.3.1 (2023-02-02) + +### Other Changes +* Update dependencies to latest versions. + +## 1.3.0 (2023-01-06) + +### Features Added +* Added `BearerTokenOptions.AuthorizationHandler` to enable extending `runtime.BearerTokenPolicy` + with custom authorization logic +* Added `Client` types and matching constructors to the `azcore` and `arm` packages. These represent a basic client for HTTP and ARM respectively. + +### Other Changes +* Updated `internal` module to latest version. +* `policy/Request.SetBody()` allows replacing a request's body with an empty one + +## 1.2.0 (2022-11-04) + +### Features Added +* Added `ClientOptions.APIVersion` field, which overrides the default version a client + requests of the service, if the client supports this (all ARM clients do). +* Added package `tracing` that contains the building blocks for distributed tracing. +* Added field `TracingProvider` to type `policy.ClientOptions` that will be used to set the per-client tracing implementation. + +### Bugs Fixed +* Fixed an issue in `runtime.SetMultipartFormData` to properly handle slices of `io.ReadSeekCloser`. +* Fixed the MaxRetryDelay default to be 60s. +* Failure to poll the state of an LRO will now return an `*azcore.ResponseError` for poller types that require this behavior. +* Fixed a bug in `runtime.NewPipeline` that would cause pipeline-specified allowed headers and query parameters to be lost. + +### Other Changes +* Retain contents of read-only fields when sending requests. + +## 1.1.4 (2022-10-06) + +### Bugs Fixed +* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`. +* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string + +### Other Changes +* Removed logging URL from retry policy as it's redundant. +* Retry policy logs when it exits due to a non-retriable status code. + +## 1.1.3 (2022-09-01) + +### Bugs Fixed +* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines. + +## 1.1.2 (2022-08-09) + +### Other Changes +* Fixed various doc bugs. + +## 1.1.1 (2022-06-30) + +### Bugs Fixed +* Avoid polling when a RELO LRO synchronously terminates. + +## 1.1.0 (2022-06-03) + +### Other Changes +* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests. + +## 1.0.0 (2022-05-12) + +### Features Added +* Added interface `runtime.PollingHandler` to support custom poller implementations. + * Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`. + +### Breaking Changes +* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost` +* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic` +* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions` +* Removed `TokenRequestOptions.TenantID` +* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration` +* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()` +* Removed `arm/runtime.FinalStateVia` and related `const` values +* Renamed `runtime.PageProcessor` to `runtime.PagingHandler` +* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported. +* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()` +* `TokenCredential.GetToken` now returns `AccessToken` by value. + +### Bugs Fixed +* When per-try timeouts are enabled, only cancel the context after the body has been read and closed. +* The `Operation-Location` poller now properly handles `final-state-via` values. +* Improvements in `runtime.Poller[T]` + * `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state. + * `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries. + +### Other Changes +* Updated to latest `internal` module and absorbed breaking changes. + * Use `temporal.Resource` and deleted copy. +* The internal poller implementation has been refactored. + * The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification. + * The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface. + * The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it. +* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions` +* Default User-Agent headers no longer include `azcore` version information + +## 0.23.1 (2022-04-14) + +### Bugs Fixed +* Include XML header when marshalling XML content. +* Handle XML namespaces when searching for error code. +* Handle `odata.error` when searching for error code. + +## 0.23.0 (2022-04-04) + +### Features Added +* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations. +* Added `cloud` package with a new API for cloud configuration +* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type. + +### Breaking Changes +* Removed the `Poller` type-alias to the internal poller implementation. +* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations. +* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter. +* Replaced `arm.Endpoint` with `cloud` API + * Removed the `endpoint` parameter from `NewRPRegistrationPolicy()` + * `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error` +* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages. + * Removed the `pollerID` parameter as it's no longer required. + * Created optional parameter structs and moved optional parameters into them. +* Changed `FinalStateVia` field to a `const` type. + +### Other Changes +* Converted expiring resource and dependent types to use generics. + +## 0.22.0 (2022-03-03) + +### Features Added +* Added header `WWW-Authenticate` to the default allow-list of headers for logging. +* Added a pipeline policy that enables the retrieval of HTTP responses from API calls. + * Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default). + +### Breaking Changes +* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package. + +## 0.21.1 (2022-02-04) + +### Bugs Fixed +* Restore response body after reading in `Poller.FinalResponse()`. (#16911) +* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969) + +### Other Changes +* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789) + +## 0.21.0 (2022-01-11) + +### Features Added +* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger. +* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received. + +### Breaking Changes +* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions` +* Renamed `arm/ClientOptions.Host` to `.Endpoint` +* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload` +* Removed `azcore.HTTPResponse` interface type +* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter +* `runtime.NewResponseError()` no longer requires an `error` parameter + +## 0.20.0 (2021-10-22) + +### Breaking Changes +* Removed `arm.Connection` +* Removed `azcore.Credential` and `.NewAnonymousCredential()` + * `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential` +* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication +* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions` +* Contents in the `log` package have been slightly renamed. +* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions` +* Changed parameters for `NewBearerTokenPolicy()` +* Moved policy config options out of `arm/runtime` and into `arm/policy` + +### Features Added +* Updating Documentation +* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints +* `azcore.ClientOptions` contains common pipeline configuration settings +* Added support for multi-tenant authorization in `arm/runtime` +* Require one second minimum when calling `PollUntilDone()` + +### Bug Fixes +* Fixed a potential panic when creating the default Transporter. +* Close LRO initial response body when creating a poller. +* Fixed a panic when recursively cloning structs that contain time.Time. + +## 0.19.0 (2021-08-25) + +### Breaking Changes +* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors). + * `azcore` has all core functionality. + * `log` contains facilities for configuring in-box logging. + * `policy` is used for configuring pipeline options and creating custom pipeline policies. + * `runtime` contains various helpers used by SDK authors and generated content. + * `streaming` has helpers for streaming IO operations. +* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed. + * As a result, the `Request.Telemetry()` method has been removed. +* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it. +* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it. +* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively. + +### Bug Fixes +* Fixed an issue in the retry policy where the request body could be overwritten after a rewind. + +### Other Changes +* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively. + * The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy. +* Poller logic has been consolidated across ARM and core implementations. + * This required some changes to the internal interfaces for core pollers. +* The core poller types have been improved, including more logging and test coverage. + +## 0.18.1 (2021-08-20) + +### Features Added +* Adds an `ETag` type for comparing etags and handling etags on requests +* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object + +### Bugs Fixed +* `JoinPaths` will preserve query parameters encoded in the `root` url. + +### Other Changes +* Bumps dependency on `internal` module to the latest version (v0.7.0) + +## 0.18.0 (2021-07-29) +### Features Added +* Replaces methods from Logger type with two package methods for interacting with the logging functionality. +* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications` +* `azcore.SetListener` replaces `azcore.Logger().SetListener` + +### Breaking Changes +* Removes `Logger` type from `azcore` + + +## 0.17.0 (2021-07-27) +### Features Added +* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879) +* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123) + +### Breaking Changes +* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104) +* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103) +* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038) + + +## 0.16.2 (2021-05-26) +### Features Added +* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715) + + +## 0.16.1 (2021-05-19) +### Features Added +* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682) + + +## 0.16.0 (2021-05-07) +### Features Added +* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642) + + +## 0.15.1 (2021-05-06) +### Features Added +* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634) + + +## 0.15.0 (2021-05-05) +### Features Added +* Add support for null map and slice +* Export `Response.Payload` method + +### Breaking Changes +* remove `Response.UnmarshalError` as it's no longer required + + +## 0.14.5 (2021-04-23) +### Features Added +* Add `UnmarshalError()` on `azcore.Response` + + +## 0.14.4 (2021-04-22) +### Features Added +* Support for basic LRO polling +* Added type `LROPoller` and supporting types for basic polling on long running operations. +* rename poller param and added doc comment + +### Bugs Fixed +* Fixed content type detection bug in logging. + + +## 0.14.3 (2021-03-29) +### Features Added +* Add support for multi-part form data +* Added method `WriteMultipartFormData()` to Request. + + +## 0.14.2 (2021-03-17) +### Features Added +* Add support for encoding JSON null values +* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null. +* Documentation fixes + +### Bugs Fixed +* Fixed improper error wrapping + + +## 0.14.1 (2021-02-08) +### Features Added +* Add `Pager` and `Poller` interfaces to azcore + + +## 0.14.0 (2021-01-12) +### Features Added +* Accept zero-value options for default values +* Specify zero-value options structs to accept default values. +* Remove `DefaultXxxOptions()` methods. +* Do not silently change TryTimeout on negative values +* make per-try timeout opt-in + + +## 0.13.4 (2020-11-20) +### Features Added +* Include telemetry string in User Agent + + +## 0.13.3 (2020-11-20) +### Features Added +* Updating response body handling on `azcore.Response` + + +## 0.13.2 (2020-11-13) +### Features Added +* Remove implementation of stateless policies as first-class functions. + + +## 0.13.1 (2020-11-05) +### Features Added +* Add `Telemetry()` method to `azcore.Request()` + + +## 0.13.0 (2020-10-14) +### Features Added +* Rename `log` to `logger` to avoid name collision with the log package. +* Documentation improvements +* Simplified `DefaultHTTPClientTransport()` implementation + + +## 0.12.1 (2020-10-13) +### Features Added +* Update `internal` module dependence to `v0.5.0` + + +## 0.12.0 (2020-10-08) +### Features Added +* Removed storage specific content +* Removed internal content to prevent API clutter +* Refactored various policy options to conform with our options pattern + + +## 0.11.0 (2020-09-22) +### Features Added + +* Removed `LogError` and `LogSlowResponse`. +* Renamed `options` in `RequestLogOptions`. +* Updated `NewRequestLogPolicy()` to follow standard pattern for options. +* Refactored `requestLogPolicy.Do()` per above changes. +* Cleaned up/added logging in retry policy. +* Export `NewResponseError()` +* Fix `RequestLogOptions` comment + + +## 0.10.1 (2020-09-17) +### Features Added +* Add default console logger +* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'. +* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks. +* Add `LogLongRunningOperation` + + +## 0.10.0 (2020-09-10) +### Features Added +* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library. +* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter. +* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`. +* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request. +* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this. +* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type. +* moved path concatenation into `JoinPaths()` func + + +## 0.9.6 (2020-08-18) +### Features Added +* Improvements to body download policy +* Always download the response body for error responses, i.e. HTTP status codes >= 400. +* Simplify variable declarations + + +## 0.9.5 (2020-08-11) +### Features Added +* Set the Content-Length header in `Request.SetBody` + + +## 0.9.4 (2020-08-03) +### Features Added +* Fix cancellation of per try timeout +* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time. +* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion. +* Do not drain response body if there are no more retries +* Do not retry non-idempotent operations when body download fails + + +## 0.9.3 (2020-07-28) +### Features Added +* Add support for custom HTTP request headers +* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request. +* Use `azcore.WithHTTPHeader` to add HTTP headers to a context. +* Remove method specific to Go 1.14 + + +## 0.9.2 (2020-07-28) +### Features Added +* Omit read-only content from request payloads +* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation. +* Verify no fields were dropped +* Handle embedded struct types +* Added test for cloning by value +* Add messages to failures + + +## 0.9.1 (2020-07-22) +### Features Added +* Updated dependency on internal module to fix race condition. + + +## 0.9.0 (2020-07-09) +### Features Added +* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure. +* Updated `sdk/internal` dependency to latest version. +* Rename package alias + + +## 0.8.2 (2020-06-29) +### Features Added +* Added missing documentation comments + +### Bugs Fixed +* Fixed a bug in body download policy. + + +## 0.8.1 (2020-06-26) +### Features Added +* Miscellaneous clean-up reported by linters + + +## 0.8.0 (2020-06-01) +### Features Added +* Differentiate between standard and URL encoding. + + +## 0.7.1 (2020-05-27) +### Features Added +* Add support for for base64 encoding and decoding of payloads. + + +## 0.7.0 (2020-05-12) +### Features Added +* Change `RetryAfter()` to a function. + + +## 0.6.0 (2020-04-29) +### Features Added +* Updating `RetryAfter` to only return the detaion in the RetryAfter header + + +## 0.5.0 (2020-03-23) +### Features Added +* Export `TransportFunc` + +### Breaking Changes +* Removed `IterationDone` + + +## 0.4.1 (2020-02-25) +### Features Added +* Ensure per-try timeout is properly cancelled +* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy. +* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed. +* `Logger.Should()` will return false if no listener is set. + + +## 0.4.0 (2020-02-18) +### Features Added +* Enable custom `RetryOptions` to be specified per API call +* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call. +* Remove 429 from the list of default HTTP status codes for retry. +* Change StatusCodesForRetry to a slice so consumers can append to it. +* Added support for retry-after in HTTP-date format. +* Cleaned up some comments specific to storage. +* Remove `Request.SetQueryParam()` +* Renamed `MaxTries` to `MaxRetries` + +## 0.3.0 (2020-01-16) +### Features Added +* Added `DefaultRetryOptions` to create initialized default options. + +### Breaking Changes +* Removed `Response.CheckStatusCode()` + + +## 0.2.0 (2020-01-15) +### Features Added +* Add support for marshalling and unmarshalling JSON +* Removed `Response.Payload` field +* Exit early when unmarsahlling if there is no payload + + +## 0.1.0 (2020-01-10) +### Features Added +* Initial release diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt similarity index 93% rename from vendor/github.com/mattn/go-colorable/LICENSE rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt index 91b5cef3..48ea6616 100644 --- a/vendor/github.com/mattn/go-colorable/LICENSE +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2016 Yasuhiro Matsumoto +Copyright (c) Microsoft Corporation. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md new file mode 100644 index 00000000..35a74e18 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md @@ -0,0 +1,39 @@ +# Azure Core Client Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azcore)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azcore%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main) +[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main) + +The `azcore` module provides a set of common interfaces and types for Go SDK client modules. +These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html). + +## Getting started + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency. +To add the latest version to your `go.mod` file, execute the following command. + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azcore +``` + +General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore). + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml new file mode 100644 index 00000000..aab92185 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -0,0 +1,29 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go new file mode 100644 index 00000000..9d077a3e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cloud + +var ( + // AzureChina contains configuration for Azure China. + AzureChina = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzureGovernment contains configuration for Azure Government. + AzureGovernment = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzurePublic contains configuration for Azure Public Cloud. + AzurePublic = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{}, + } +) + +// ServiceName identifies a cloud service. +type ServiceName string + +// ResourceManager is a global constant identifying Azure Resource Manager. +const ResourceManager ServiceName = "resourceManager" + +// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager. +type ServiceConfiguration struct { + // Audience is the audience the client will request for its access tokens. + Audience string + // Endpoint is the service's base URL. + Endpoint string +} + +// Configuration configures a cloud. +type Configuration struct { + // ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory. + ActiveDirectoryAuthorityHost string + // Services contains configuration for the cloud's services. + Services map[ServiceName]ServiceConfiguration +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go new file mode 100644 index 00000000..985b1bde --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go @@ -0,0 +1,53 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/* +Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds. + +Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as +"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other +Azure Clouds to configure clients appropriately. + +This package contains predefined configuration for well-known sovereign clouds such as Azure Government and +Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For +example, configuring a credential and ARM client for Azure Government: + + opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment} + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) + +Applications deployed to a private cloud such as Azure Stack create a Configuration object with +appropriate values: + + c := cloud.Configuration{ + ActiveDirectoryAuthorityHost: "https://...", + Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ + cloud.ResourceManager: { + Audience: "...", + Endpoint: "https://...", + }, + }, + } + opts := azcore.ClientOptions{Cloud: c} + + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) +*/ +package cloud diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go new file mode 100644 index 00000000..27231ad9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -0,0 +1,114 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// AccessToken represents an Azure service bearer access token with expiry information. +type AccessToken = exported.AccessToken + +// TokenCredential represents a credential capable of providing an OAuth token. +type TokenCredential = exported.TokenCredential + +// holds sentinel values used to send nulls +var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} + +// NullValue is used to send an explicit 'null' within a request. +// This is typically used in JSON-MERGE-PATCH operations to delete a value. +func NullValue[T any]() T { + t := shared.TypeOfT[T]() + v, found := nullables[t] + if !found { + var o reflect.Value + if k := t.Kind(); k == reflect.Map { + o = reflect.MakeMap(t) + } else if k == reflect.Slice { + // empty slices appear to all point to the same data block + // which causes comparisons to become ambiguous. so we create + // a slice with len/cap of one which ensures a unique address. + o = reflect.MakeSlice(t, 1, 1) + } else { + o = reflect.New(t.Elem()) + } + v = o.Interface() + nullables[t] = v + } + // return the sentinel object + return v.(T) +} + +// IsNullValue returns true if the field contains a null sentinel value. +// This is used by custom marshallers to properly encode a null value. +func IsNullValue[T any](v T) bool { + // see if our map has a sentinel object for this *T + t := reflect.TypeOf(v) + if o, found := nullables[t]; found { + o1 := reflect.ValueOf(o) + v1 := reflect.ValueOf(v) + // we found it; return true if v points to the sentinel object. + // NOTE: maps and slices can only be compared to nil, else you get + // a runtime panic. so we compare addresses instead. + return o1.Pointer() == v1.Pointer() + } + // no sentinel object for this *t + return false +} + +// ClientOptions contains configuration settings for a client's pipeline. +type ClientOptions = policy.ClientOptions + +// Client is a basic HTTP client. It consists of a pipeline and tracing provider. +type Client struct { + pl runtime.Pipeline + tr tracing.Tracer +} + +// NewClient creates a new Client instance with the provided values. +// - clientName - the fully qualified name of the client ("module/package.Client"); this is used by the telemetry policy and tracing provider. +// if module and package are the same value, the "module/" prefix can be omitted. +// - moduleVersion - the semantic version of the containing module; used by the telemetry policy +// - plOpts - pipeline configuration options; can be the zero-value +// - options - optional client configurations; pass nil to accept the default values +func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { + mod, client, err := shared.ExtractModuleName(clientName) + if err != nil { + return nil, err + } + + if options == nil { + options = &ClientOptions{} + } + + if !options.Telemetry.Disabled { + if err := shared.ValidateModVer(moduleVersion); err != nil { + return nil, err + } + } + + pl := runtime.NewPipeline(mod, moduleVersion, plOpts, options) + + tr := options.TracingProvider.NewTracer(client, moduleVersion) + return &Client{pl: pl, tr: tr}, nil +} + +// Pipeline returns the pipeline for this client. +func (c *Client) Pipeline() runtime.Pipeline { + return c.pl +} + +// Tracer returns the tracer for this client. +func (c *Client) Tracer() tracing.Tracer { + return c.tr +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go new file mode 100644 index 00000000..28c64678 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -0,0 +1,257 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients. + +The middleware consists of three components. + + - One or more Policy instances. + - A Transporter instance. + - A Pipeline instance that combines the Policy and Transporter instances. + +# Implementing the Policy Interface + +A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as +a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share +the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to +avoid race conditions. + +A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can +perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, +and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request +work, it must call the Next() method on the *policy.Request instance in order to pass the request to the +next Policy in the chain. + +When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance +can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response +body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response +and error instances to its caller. + +Template for implementing a stateless Policy: + + type policyFunc func(*policy.Request) (*http.Response, error) + + // Do implements the Policy interface on policyFunc. + func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) + } + + func NewMyStatelessPolicy() policy.Policy { + return policyFunc(func(req *policy.Request) (*http.Response, error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + }) + } + +Template for implementing a stateful Policy: + + type MyStatefulPolicy struct { + // TODO: add configuration/setting fields here + } + + // TODO: add initialization args to NewMyStatefulPolicy() + func NewMyStatefulPolicy() policy.Policy { + return &MyStatefulPolicy{ + // TODO: initialize configuration/setting fields here + } + } + + func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + } + +# Implementing the Transporter Interface + +The Transporter interface is responsible for sending the HTTP request and returning the corresponding +HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter +implementation uses a shared http.Client from the standard library. + +The same stateful/stateless rules for Policy implementations apply to Transporter implementations. + +# Using Policy and Transporter Instances Via a Pipeline + +To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function. + + func NewPipeline(transport Transporter, policies ...Policy) Pipeline + +The specified Policy instances form a chain and are invoked in the order provided to NewPipeline +followed by the Transporter. + +Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method. + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + + func (p Pipeline) Do(req *Request) (*http.Request, error) + +The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter +instances. The response/error is then sent through the same chain of Policy instances in reverse +order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with +TransportA. + + pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) + +The flow of Request and Response looks like the following: + + policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ + | + HTTP(S) endpoint + | + caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ + +# Creating a Request Instance + +The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also +contains some internal state and provides various convenience methods. You create a Request instance +by calling the runtime.NewRequest function: + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + +If the Request should contain a body, call the SetBody method. + + func (req *Request) SetBody(body ReadSeekCloser, contentType string) error + +A seekable stream is required so that upon retry, the retry Policy instance can seek the stream +back to the beginning before retrying the network request and re-uploading the body. + +# Sending an Explicit Null + +Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted. + + { + "delete-me": null + } + +This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as +a means to resolve the ambiguity between a field to be excluded and its zero-value. + + type Widget struct { + Name *string `json:",omitempty"` + Count *int `json:",omitempty"` + } + +In the above example, Name and Count are defined as pointer-to-type to disambiguate between +a missing value (nil) and a zero-value (0) which might have semantic differences. + +In a PATCH operation, any fields left as nil are to have their values preserved. When updating +a Widget's count, one simply specifies the new value for Count, leaving Name nil. + +To fulfill the requirement for sending a JSON null, the NullValue() function can be used. + + w := Widget{ + Count: azcore.NullValue[*int](), + } + +This sends an explict "null" for Count, indicating that any current value for Count should be deleted. + +# Processing the Response + +When the HTTP response is received, the *http.Response is returned directly. Each Policy instance +can inspect/mutate the *http.Response. + +# Built-in Logging + +To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program. + +By default the logger writes to stderr. This can be customized by calling log.SetListener, providing +a callback that writes to the desired location. Any custom logging implementation MUST provide its +own synchronization to handle concurrent invocations. + +See the docs for the log package for further details. + +# Pageable Operations + +Pageable operations return potentially large data sets spread over multiple GET requests. The result of +each GET is a "page" of data consisting of a slice of items. + +Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T]. + + func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] + +The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages +and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked. + + pager := widgetClient.NewListWidgetsPager(nil) + for pager.More() { + page, err := pager.NextPage(context.TODO()) + // handle err + for _, widget := range page.Values { + // process widget + } + } + +# Long-Running Operations + +Long-running operations (LROs) are operations consisting of an initial request to start the operation followed +by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one +of the following values. + + - Succeeded - the LRO completed successfully + - Failed - the LRO failed to complete + - Canceled - the LRO was canceled + +LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T]. + + func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) + +When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started. +It does _not_ mean that the widget has been created or updated (or failed to be created/updated). + +The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete, +call the PollUntilDone() method. + + poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) + // handle err + result, err := poller.PollUntilDone(context.TODO(), nil) + // handle err + // use result + +The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the +context is canceled/timed out. + +Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to +this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation +mechanism as required. + +# Resume Tokens + +Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to +recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method. + + token, err := poller.ResumeToken() + // handle error + +Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls +to poller.Poll() might change the poller's state. In this case, a new token should be created. + +After the token has been obtained, it can be used to recreate an instance of the originating poller. + + poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ + ResumeToken: token, + }) + +When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken. + +Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO +BeginA() will result in an error. +*/ +package azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go new file mode 100644 index 00000000..17bd50c6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go @@ -0,0 +1,14 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go new file mode 100644 index 00000000..23ea7e7c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go @@ -0,0 +1,48 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "strings" +) + +// ETag is a property used for optimistic concurrency during updates +// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2 +// An ETag can be empty (""). +type ETag string + +// ETagAny is an ETag that represents everything, the value is "*" +const ETagAny ETag = "*" + +// Equals does a strong comparison of two ETags. Equals returns true when both +// ETags are not weak and the values of the underlying strings are equal. +func (e ETag) Equals(other ETag) bool { + return !e.IsWeak() && !other.IsWeak() && e == other +} + +// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match +// character-by-character, regardless of either or both being tagged as "weak". +func (e ETag) WeakEquals(other ETag) bool { + getStart := func(e1 ETag) int { + if e1.IsWeak() { + return 2 + } + return 0 + } + aStart := getStart(e) + bStart := getStart(other) + + aVal := e[aStart:] + bVal := other[bStart:] + + return aVal == bVal +} + +// IsWeak specifies whether the ETag is strong or weak. +func (e ETag) IsWeak() bool { + return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go new file mode 100644 index 00000000..a1236b36 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "context" + "io" + "net/http" + "time" +) + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// Exported as streaming.NopCloser(). +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// AccessToken represents an Azure service bearer access token with expiry information. +// Exported as azcore.AccessToken. +type AccessToken struct { + Token string + ExpiresOn time.Time +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +// Exported as policy.TokenRequestOptions. +type TokenRequestOptions struct { + // Scopes contains the list of permission scopes required for the token. + Scopes []string + + // TenantID identifies the tenant from which to request the token. azidentity credentials authenticate in + // their configured default tenants when this field isn't set. + TenantID string +} + +// TokenCredential represents a credential capable of providing an OAuth token. +// Exported as azcore.TokenCredential. +type TokenCredential interface { + // GetToken requests an access token for the specified set of scopes. + GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go new file mode 100644 index 00000000..c44efd6e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go @@ -0,0 +1,97 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "fmt" + "net/http" + + "golang.org/x/net/http/httpguts" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +// Exported as policy.Policy. +type Policy interface { + // Do applies the policy to the specified Request. When implementing a Policy, mutate the + // request before calling req.Next() to move on to the next policy, and respond to the result + // before returning to the caller. + Do(req *Request) (*http.Response, error) +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +// Exported as runtime.Pipeline. +type Pipeline struct { + policies []Policy +} + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +// Exported as policy.Transporter. +type Transporter interface { + // Do sends the HTTP request and returns the HTTP response or error. + Do(req *http.Request) (*http.Response, error) +} + +// used to adapt a TransportPolicy to a Policy +type transportPolicy struct { + trans Transporter +} + +func (tp transportPolicy) Do(req *Request) (*http.Response, error) { + if tp.trans == nil { + return nil, errors.New("missing transporter") + } + resp, err := tp.trans.Do(req.Raw()) + if err != nil { + return nil, err + } else if resp == nil { + // there was no response and no error (rare but can happen) + // this ensures the retry policy will retry the request + return nil, errors.New("received nil response") + } + return resp, nil +} + +// NewPipeline creates a new Pipeline object from the specified Policies. +// Not directly exported, but used as part of runtime.NewPipeline(). +func NewPipeline(transport Transporter, policies ...Policy) Pipeline { + // transport policy must always be the last in the slice + policies = append(policies, transportPolicy{trans: transport}) + return Pipeline{ + policies: policies, + } +} + +// Do is called for each and every HTTP request. It passes the request through all +// the Policy objects (which can transform the Request's URL/query parameters/headers) +// and ultimately sends the transformed HTTP request over the network. +func (p Pipeline) Do(req *Request) (*http.Response, error) { + if req == nil { + return nil, errors.New("request cannot be nil") + } + // check copied from Transport.roundTrip() + for k, vv := range req.Raw().Header { + if !httpguts.ValidHeaderFieldName(k) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field value %q for key %v", v, k) + } + } + } + req.policies = p.policies + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go new file mode 100644 index 00000000..fa99d1b7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -0,0 +1,182 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use NewRequest() instead. +// Exported as policy.Request. +type Request struct { + req *http.Request + body io.ReadSeekCloser + policies []Policy + values opValues +} + +type opValues map[reflect.Type]interface{} + +// Set adds/changes a value +func (ov opValues) set(value interface{}) { + ov[reflect.TypeOf(value)] = value +} + +// Get looks for a value set by SetValue first +func (ov opValues) get(value interface{}) bool { + v, ok := ov[reflect.ValueOf(value).Elem().Type()] + if ok { + reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v)) + } + return ok +} + +// NewRequest creates a new Request with the specified input. +// Exported as runtime.NewRequest(). +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { + req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil) + if err != nil { + return nil, err + } + if req.URL.Host == "" { + return nil, errors.New("no Host in request URL") + } + if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") { + return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme) + } + return &Request{req: req}, nil +} + +// Body returns the original body specified when the Request was created. +func (req *Request) Body() io.ReadSeekCloser { + return req.body +} + +// Raw returns the underlying HTTP request. +func (req *Request) Raw() *http.Request { + return req.req +} + +// Next calls the next policy in the pipeline. +// If there are no more policies, nil and an error are returned. +// This method is intended to be called from pipeline policies. +// To send a request through a pipeline call Pipeline.Do(). +func (req *Request) Next() (*http.Response, error) { + if len(req.policies) == 0 { + return nil, errors.New("no more policies") + } + nextPolicy := req.policies[0] + nextReq := *req + nextReq.policies = nextReq.policies[1:] + return nextPolicy.Do(&nextReq) +} + +// SetOperationValue adds/changes a mutable key/value associated with a single operation. +func (req *Request) SetOperationValue(value interface{}) { + if req.values == nil { + req.values = opValues{} + } + req.values.set(value) +} + +// OperationValue looks for a value set by SetOperationValue(). +func (req *Request) OperationValue(value interface{}) bool { + if req.values == nil { + return false + } + return req.values.get(value) +} + +// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length +// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "", +// Content-Type won't be set. +// Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser. +func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { + var err error + var size int64 + if body != nil { + size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return err + } + } + if size == 0 { + // treat an empty stream the same as a nil one: assign req a nil body + body = nil + // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content + // (Del is a no-op when the header has no value) + req.req.Header.Del(shared.HeaderContentLength) + } else { + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return err + } + req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + req.Raw().GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream + return body, err + } + } + // keep a copy of the body argument. this is to handle cases + // where req.Body is replaced, e.g. httputil.DumpRequest and friends. + req.body = body + req.req.Body = body + req.req.ContentLength = size + if contentType == "" { + // Del is a no-op when the header has no value + req.req.Header.Del(shared.HeaderContentType) + } else { + req.req.Header.Set(shared.HeaderContentType, contentType) + } + return nil +} + +// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. +func (req *Request) RewindBody() error { + if req.body != nil { + // Reset the stream back to the beginning and restore the body + _, err := req.body.Seek(0, io.SeekStart) + req.req.Body = req.body + return err + } + return nil +} + +// Close closes the request body. +func (req *Request) Close() error { + if req.body == nil { + return nil + } + return req.body.Close() +} + +// Clone returns a deep copy of the request with its context changed to ctx. +func (req *Request) Clone(ctx context.Context) *Request { + r2 := *req + r2.req = req.req.Clone(ctx) + return &r2 +} + +// not exported but dependent on Request + +// PolicyFunc is a type that implements the Policy interface. +// Use this type when implementing a stateless policy as a first-class function. +type PolicyFunc func(*Request) (*http.Response, error) + +// Do implements the Policy interface on policyFunc. +func (pf PolicyFunc) Do(req *Request) (*http.Response, error) { + return pf(req) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go new file mode 100644 index 00000000..7df2f88c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -0,0 +1,144 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "regexp" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// NewResponseError creates a new *ResponseError from the provided HTTP response. +// Exported as runtime.NewResponseError(). +func NewResponseError(resp *http.Response) error { + respErr := &ResponseError{ + StatusCode: resp.StatusCode, + RawResponse: resp, + } + + // prefer the error code in the response header + if ec := resp.Header.Get("x-ms-error-code"); ec != "" { + respErr.ErrorCode = ec + return respErr + } + + // if we didn't get x-ms-error-code, check in the response body + body, err := exported.Payload(resp, nil) + if err != nil { + return err + } + + if len(body) > 0 { + if code := extractErrorCodeJSON(body); code != "" { + respErr.ErrorCode = code + } else if code := extractErrorCodeXML(body); code != "" { + respErr.ErrorCode = code + } + } + + return respErr +} + +func extractErrorCodeJSON(body []byte) string { + var rawObj map[string]interface{} + if err := json.Unmarshal(body, &rawObj); err != nil { + // not a JSON object + return "" + } + + // check if this is a wrapped error, i.e. { "error": { ... } } + // if so then unwrap it + if wrapped, ok := rawObj["error"]; ok { + unwrapped, ok := wrapped.(map[string]interface{}) + if !ok { + return "" + } + rawObj = unwrapped + } else if wrapped, ok := rawObj["odata.error"]; ok { + // check if this a wrapped odata error, i.e. { "odata.error": { ... } } + unwrapped, ok := wrapped.(map[string]any) + if !ok { + return "" + } + rawObj = unwrapped + } + + // now check for the error code + code, ok := rawObj["code"] + if !ok { + return "" + } + codeStr, ok := code.(string) + if !ok { + return "" + } + return codeStr +} + +func extractErrorCodeXML(body []byte) string { + // regular expression is much easier than dealing with the XML parser + rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`) + res := rx.FindStringSubmatch(string(body)) + if len(res) != 2 { + return "" + } + // first submatch is the entire thing, second one is the captured error code + return res[1] +} + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +// Exported as azcore.ResponseError. +type ResponseError struct { + // ErrorCode is the error code returned by the resource provider if available. + ErrorCode string + + // StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants. + StatusCode int + + // RawResponse is the underlying HTTP response. + RawResponse *http.Response +} + +// Error implements the error interface for type ResponseError. +// Note that the message contents are not contractual and can change over time. +func (e *ResponseError) Error() string { + // write the request method and URL with response status code + msg := &bytes.Buffer{} + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + if e.ErrorCode != "" { + fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode) + } else { + fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := exported.Payload(e.RawResponse, nil) + if err != nil { + // this really shouldn't fail at this point as the response + // body is already cached (it was read in NewResponseError) + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + // the standard library doesn't have a pretty-printer for XML + fmt.Fprintln(msg) + } else { + fmt.Fprintln(msg, "Response contained no body") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + + return msg.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go new file mode 100644 index 00000000..0684cb31 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// This is an internal helper package to combine the complete logging APIs. +package log + +import ( + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type Event = log.Event + +const ( + EventRequest = azlog.EventRequest + EventResponse = azlog.EventResponse + EventRetryPolicy = azlog.EventRetryPolicy + EventLRO = azlog.EventLRO +) + +func Write(cls log.Event, msg string) { + log.Write(cls, msg) +} + +func Writef(cls log.Event, format string, a ...interface{}) { + log.Writef(cls, format, a...) +} + +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +func Should(cls log.Event) bool { + return log.Should(cls) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go new file mode 100644 index 00000000..b05bd8b3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -0,0 +1,159 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package async + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md + +// Applicable returns true if the LRO is using Azure-AsyncOperation. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderAzureAsync) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["asyncURL"] + return ok +} + +// Poller is an LRO poller that uses the Azure-AsyncOperation pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The URL from Azure-AsyncOperation header. + AsyncURL string `json:"asyncURL"` + + // The URL from Location header. + LocURL string `json:"locURL"` + + // The URL from the initial LRO request. + OrigURL string `json:"origURL"` + + // The HTTP method from the initial LRO request. + Method string `json:"method"` + + // The value of final-state-via from swagger, can be the empty string. + FinalState pollers.FinalStateVia `json:"finalState"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response and final-state type. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.") + asyncURL := resp.Header.Get(shared.HeaderAzureAsync) + if asyncURL == "" { + return nil, errors.New("response is missing Azure-AsyncOperation header") + } + if !poller.IsValidURL(asyncURL) { + return nil, fmt.Errorf("invalid polling URL %s", asyncURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := poller.GetProvisioningState(resp) + if state == "" { + state = poller.StatusInProgress + } + p := &Poller[T]{ + pl: pl, + resp: resp, + AsyncURL: asyncURL, + LocURL: resp.Header.Get(shared.HeaderLocation), + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: state, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := poller.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if poller.Failed(p.CurState) { + return exported.NewResponseError(p.resp) + } + var req *exported.Request + var err error + if p.Method == http.MethodPatch || p.Method == http.MethodPut { + // for PATCH and PUT, the final GET is on the original resource URL + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost { + if p.FinalState == pollers.FinalStateViaAzureAsyncOp { + // no final GET required + } else if p.FinalState == pollers.FinalStateViaOriginalURI { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.LocURL != "" { + // ideally FinalState would be set to "location" but it isn't always. + // must check last due to more permissive condition. + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go new file mode 100644 index 00000000..2bb9e105 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -0,0 +1,135 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package body + +import ( + "context" + "errors" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Kind is the identifier of this type in a resume token. +const kind = "body" + +// Applicable returns true if the LRO is using no headers, just provisioning state. +// This is only applicable to PATCH and PUT methods and assumes no polling headers. +func Applicable(resp *http.Response) bool { + // we can't check for absense of headers due to some misbehaving services + // like redis that return a Location header but don't actually use that protocol + return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Body pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The poller's type, used for resume token processing. + Type string `json:"type"` + + // The URL for polling. + PollURL string `json:"pollURL"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Body poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Body poller.") + p := &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: resp.Request.URL.String(), + } + // default initial state to InProgress. depending on the HTTP + // status code and provisioning state, we might change the value. + curState := poller.StatusInProgress + provState, err := poller.GetProvisioningState(resp) + if err != nil && !errors.Is(err, poller.ErrNoBody) { + return nil, err + } + if resp.StatusCode == http.StatusCreated && provState != "" { + // absense of provisioning state is ok for a 201, means the operation is in progress + curState = provState + } else if resp.StatusCode == http.StatusOK { + if provState != "" { + curState = provState + } else if provState == "" { + // for a 200, absense of provisioning state indicates success + curState = poller.StatusSucceeded + } + } else if resp.StatusCode == http.StatusNoContent { + curState = poller.StatusSucceeded + } + p.CurState = curState + return p, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + if resp.StatusCode == http.StatusNoContent { + p.resp = resp + p.CurState = poller.StatusSucceeded + return p.CurState, nil + } + state, err := poller.GetProvisioningState(resp) + if errors.Is(err, poller.ErrNoBody) { + // a missing response body in non-204 case is an error + return "", err + } else if state == "" { + // a response body without provisioning state is considered terminal success + state = poller.StatusSucceeded + } else if err != nil { + return "", err + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go new file mode 100644 index 00000000..d6be8987 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -0,0 +1,119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package loc + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Kind is the identifier of this type in a resume token. +const kind = "loc" + +// Applicable returns true if the LRO is using Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + Type string `json:"type"` + PollURL string `json:"pollURL"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Location poller.") + locURL := resp.Header.Get(shared.HeaderLocation) + if locURL == "" { + return nil, errors.New("response is missing Location header") + } + if !poller.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid polling URL %s", locURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := poller.GetProvisioningState(resp) + if state == "" { + state = poller.StatusInProgress + } + return &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: locURL, + CurState: state, + }, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + // location polling can return an updated polling URL + if h := resp.Header.Get(shared.HeaderLocation); h != "" { + p.PollURL = h + } + // if provisioning state is available, use that. this is only + // for some ARM LRO scenarios (e.g. DELETE with a Location header) + // so if it's missing then use HTTP status code. + provState, _ := poller.GetProvisioningState(resp) + p.resp = resp + if provState != "" { + p.CurState = provState + } else if resp.StatusCode == http.StatusAccepted { + p.CurState = poller.StatusInProgress + } else if resp.StatusCode > 199 && resp.StatusCode < 300 { + // any 2xx other than a 202 indicates success + p.CurState = poller.StatusSucceeded + } else { + p.CurState = poller.StatusFailed + } + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go new file mode 100644 index 00000000..1bc7ad0a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -0,0 +1,145 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package op + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Applicable returns true if the LRO is using Operation-Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderOperationLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["oplocURL"] + return ok +} + +// Poller is an LRO poller that uses the Operation-Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + OpLocURL string `json:"oplocURL"` + LocURL string `json:"locURL"` + OrigURL string `json:"origURL"` + Method string `json:"method"` + FinalState pollers.FinalStateVia `json:"finalState"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Operation-Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Operation-Location poller.") + opURL := resp.Header.Get(shared.HeaderOperationLocation) + if opURL == "" { + return nil, errors.New("response is missing Operation-Location header") + } + if !poller.IsValidURL(opURL) { + return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL) + } + locURL := resp.Header.Get(shared.HeaderLocation) + // Location header is optional + if locURL != "" && !poller.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid Location URL %s", locURL) + } + // default initial state to InProgress. if the + // service sent us a status then use that instead. + curState := poller.StatusInProgress + status, err := poller.GetStatus(resp) + if err != nil && !errors.Is(err, poller.ErrNoBody) { + return nil, err + } + if status != "" { + curState = status + } + + return &Poller[T]{ + pl: pl, + resp: resp, + OpLocURL: opURL, + LocURL: locURL, + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: curState, + }, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := poller.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + var req *exported.Request + var err error + if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost { + // no final GET required, terminal response should have it + } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { + return rlErr + } else if rl != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, rl) + } else if p.Method == http.MethodPatch || p.Method == http.MethodPut { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go new file mode 100644 index 00000000..37ed647f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go @@ -0,0 +1,24 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia string + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation" + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation FinalStateVia = "location" + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI FinalStateVia = "original-uri" + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation FinalStateVia = "operation-location" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go new file mode 100644 index 00000000..d8d86a46 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -0,0 +1,187 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + + azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// getTokenTypeName creates a type name from the type parameter T. +func getTokenTypeName[T any]() (string, error) { + tt := shared.TypeOfT[T]() + var n string + if tt.Kind() == reflect.Pointer { + n = "*" + tt = tt.Elem() + } + n += tt.Name() + if n == "" { + return "", errors.New("nameless types are not allowed") + } + return n, nil +} + +type resumeTokenWrapper[T any] struct { + Type string `json:"type"` + Token T `json:"token"` +} + +// NewResumeToken creates a resume token from the specified type. +// An error is returned if the generic type has no name (e.g. struct{}). +func NewResumeToken[TResult, TSource any](from TSource) (string, error) { + n, err := getTokenTypeName[TResult]() + if err != nil { + return "", err + } + b, err := json.Marshal(resumeTokenWrapper[TSource]{ + Type: n, + Token: from, + }) + if err != nil { + return "", err + } + return string(b), nil +} + +// ExtractToken returns the poller-specific token information from the provided token value. +func ExtractToken(token string) ([]byte, error) { + raw := map[string]json.RawMessage{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return nil, err + } + // this is dependent on the type resumeTokenWrapper[T] + tk, ok := raw["token"] + if !ok { + return nil, errors.New("missing token value") + } + return tk, nil +} + +// IsTokenValid returns an error if the specified token isn't applicable for generic type T. +func IsTokenValid[T any](token string) error { + raw := map[string]interface{}{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return err + } + t, ok := raw["type"] + if !ok { + return errors.New("missing type value") + } + tt, ok := t.(string) + if !ok { + return fmt.Errorf("invalid type format %T", t) + } + n, err := getTokenTypeName[T]() + if err != nil { + return err + } + if tt != n { + return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n) + } + return nil +} + +// used if the operation synchronously completed +type NopPoller[T any] struct { + resp *http.Response + result T +} + +// NewNopPoller creates a NopPoller from the provided response. +// It unmarshals the response body into an instance of T. +func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) { + np := &NopPoller[T]{resp: resp} + if resp.StatusCode == http.StatusNoContent { + return np, nil + } + payload, err := exported.Payload(resp, nil) + if err != nil { + return nil, err + } + if len(payload) == 0 { + return np, nil + } + if err = json.Unmarshal(payload, &np.result); err != nil { + return nil, err + } + return np, nil +} + +func (*NopPoller[T]) Done() bool { + return true +} + +func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) { + return p.resp, nil +} + +func (p *NopPoller[T]) Result(ctx context.Context, out *T) error { + *out = p.result + return nil +} + +// PollHelper creates and executes the request, calling update() with the response. +// If the request fails, the update func is not called. +// The update func returns the state of the operation for logging purposes or an error +// if it fails to extract the required state from the response. +func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, update func(resp *http.Response) (string, error)) error { + req, err := azexported.NewRequest(ctx, http.MethodGet, endpoint) + if err != nil { + return err + } + resp, err := pl.Do(req) + if err != nil { + return err + } + state, err := update(resp) + if err != nil { + return err + } + log.Writef(log.EventLRO, "State %s", state) + return nil +} + +// ResultHelper processes the response as success or failure. +// In the success case, it unmarshals the payload into either a new instance of T or out. +// In the failure case, it creates an *azcore.Response error from the response. +func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { + // short-circuit the simple success case with no response body to unmarshal + if resp.StatusCode == http.StatusNoContent { + return nil + } + + defer resp.Body.Close() + if !poller.StatusCodeValid(resp) || failed { + // the LRO failed. unmarshall the error and update state + return azexported.NewResponseError(resp) + } + + // success case + payload, err := exported.Payload(resp, nil) + if err != nil { + return err + } + if len(payload) == 0 { + return nil + } + + if err = json.Unmarshal(payload, out); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go new file mode 100644 index 00000000..269a831e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -0,0 +1,36 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +const ( + ContentTypeAppJSON = "application/json" + ContentTypeAppXML = "application/xml" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderAuxiliaryAuthorization = "x-ms-authorization-auxiliary" + HeaderAzureAsync = "Azure-AsyncOperation" + HeaderContentLength = "Content-Length" + HeaderContentType = "Content-Type" + HeaderLocation = "Location" + HeaderOperationLocation = "Operation-Location" + HeaderRetryAfter = "Retry-After" + HeaderUserAgent = "User-Agent" + HeaderWWWAuthenticate = "WWW-Authenticate" + HeaderXMSClientRequestID = "x-ms-client-request-id" +) + +const BearerTokenPrefix = "Bearer " + +const ( + // Module is the name of the calling module used in telemetry data. + Module = "azcore" + + // Version is the semantic version (see http://semver.org) of this module. + Version = "v1.6.1" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go new file mode 100644 index 00000000..db0aaa7c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -0,0 +1,103 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +import ( + "context" + "fmt" + "net/http" + "reflect" + "regexp" + "strconv" + "time" +) + +// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. +type CtxWithHTTPHeaderKey struct{} + +// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. +type CtxWithRetryOptionsKey struct{} + +// CtxIncludeResponseKey is used as a context key for retrieving the raw response. +type CtxIncludeResponseKey struct{} + +// Delay waits for the duration to elapse or the context to be cancelled. +func Delay(ctx context.Context, delay time.Duration) error { + select { + case <-time.After(delay): + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// RetryAfter returns non-zero if the response contains a Retry-After header value. +func RetryAfter(resp *http.Response) time.Duration { + if resp == nil { + return 0 + } + ra := resp.Header.Get(HeaderRetryAfter) + if ra == "" { + return 0 + } + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + return time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + return time.Until(t) + } + return 0 +} + +// TypeOfT returns the type of the generic type param. +func TypeOfT[T any]() reflect.Type { + // you can't, at present, obtain the type of + // a type parameter, so this is the trick + return reflect.TypeOf((*T)(nil)).Elem() +} + +// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface. +type TransportFunc func(*http.Request) (*http.Response, error) + +// Do implements the Transporter interface for the TransportFunc type. +func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) { + return pf(req) +} + +// ValidateModVer verifies that moduleVersion is a valid semver 2.0 string. +func ValidateModVer(moduleVersion string) error { + modVerRegx := regexp.MustCompile(`^v\d+\.\d+\.\d+(?:-[a-zA-Z0-9_.-]+)?$`) + if !modVerRegx.MatchString(moduleVersion) { + return fmt.Errorf("malformed moduleVersion param value %s", moduleVersion) + } + return nil +} + +// ExtractModuleName returns "module", "package.Client" from "module/package.Client" or +// "package", "package.Client" from "package.Client" when there's no "module/" prefix. +// If clientName is malformed, an error is returned. +func ExtractModuleName(clientName string) (string, string, error) { + // uses unnamed capturing for "module", "package.Client", and "package" + regex, err := regexp.Compile(`^(?:([a-z0-9]+)/)?(([a-z0-9]+)\.(?:[A-Za-z0-9]+))$`) + if err != nil { + return "", "", err + } + + matches := regex.FindStringSubmatch(clientName) + if len(matches) < 4 { + return "", "", fmt.Errorf("malformed clientName %s", clientName) + } + + // the first match is the entire string, the second is "module", the third is + // "package.Client" and the fourth is "package". + // if there was no "module/" prefix, the second match will be the empty string + if matches[1] != "" { + return matches[1], matches[2], nil + } + return matches[3], matches[2], nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go new file mode 100644 index 00000000..2f3901bf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package log contains functionality for configuring logging behavior. +// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all". +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go new file mode 100644 index 00000000..7bde29d0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go @@ -0,0 +1,50 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package log provides functionality for configuring logging facilities. +package log + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// Event is used to group entries. Each group can be toggled on or off. +type Event = log.Event + +const ( + // EventRequest entries contain information about HTTP requests. + // This includes information like the URL, query parameters, and headers. + EventRequest Event = "Request" + + // EventResponse entries contain information about HTTP responses. + // This includes information like the HTTP status code, headers, and request URL. + EventResponse Event = "Response" + + // EventRetryPolicy entries contain information specific to the retry policy in use. + EventRetryPolicy Event = "Retry" + + // EventLRO entries contain information specific to long-running operations. + // This includes information like polling location, operation state, and sleep intervals. + EventLRO Event = "LongRunningOperation" +) + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetEvents(cls ...Event) { + log.SetEvents(cls...) +} + +// SetListener will set the Logger to write to the specified Listener. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +// for testing purposes +func resetEvents() { + log.TestResetEvents() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go new file mode 100644 index 00000000..fad2579e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package policy contains the definitions needed for configuring in-box pipeline policies +// and creating custom policies. +package policy diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go new file mode 100644 index 00000000..b2000478 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -0,0 +1,164 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package policy + +import ( + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +type Policy = exported.Policy + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +type Transporter = exported.Transporter + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use runtime.NewRequest() instead. +type Request = exported.Request + +// ClientOptions contains optional settings for a client's pipeline. +// All zero-value fields will be initialized with default values. +type ClientOptions struct { + // APIVersion overrides the default version requested of the service. Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion string + + // Cloud specifies a cloud for the client. The default is Azure Public Cloud. + Cloud cloud.Configuration + + // Logging configures the built-in logging policy. + Logging LogOptions + + // Retry configures the built-in retry policy. + Retry RetryOptions + + // Telemetry configures the built-in telemetry policy. + Telemetry TelemetryOptions + + // TracingProvider configures the tracing provider. + // It defaults to a no-op tracer. + TracingProvider tracing.Provider + + // Transport sets the transport for HTTP requests. + Transport Transporter + + // PerCallPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCallPolicies []Policy + + // PerRetryPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetryPolicies []Policy +} + +// LogOptions configures the logging policy's behavior. +type LogOptions struct { + // IncludeBody indicates if request and response bodies should be included in logging. + // The default value is false. + // NOTE: enabling this can lead to disclosure of sensitive information, use with care. + IncludeBody bool + + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParams is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParams []string +} + +// RetryOptions configures the retry policy's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. +type RetryOptions struct { + // MaxRetries specifies the maximum number of attempts a failed operation will be retried + // before producing an error. + // The default value is three. A value less than zero means one try and no retries. + MaxRetries int32 + + // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. + // This is disabled by default. Specify a value greater than zero to enable. + // NOTE: Setting this to a small value might cause premature HTTP request time-outs. + TryTimeout time.Duration + + // RetryDelay specifies the initial amount of delay to use before retrying an operation. + // The value is used only if the HTTP response does not contain a Retry-After header. + // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. + // The default value is four seconds. A value less than zero means no delay between retries. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation. + // Typically the value is greater than or equal to the value specified in RetryDelay. + // The default Value is 60 seconds. A value less than zero means there is no cap. + MaxRetryDelay time.Duration + + // StatusCodes specifies the HTTP status codes that indicate the operation should be retried. + // A nil slice will use the following values. + // http.StatusRequestTimeout 408 + // http.StatusTooManyRequests 429 + // http.StatusInternalServerError 500 + // http.StatusBadGateway 502 + // http.StatusServiceUnavailable 503 + // http.StatusGatewayTimeout 504 + // Specifying values will replace the default values. + // Specifying an empty slice will disable retries for HTTP status codes. + StatusCodes []int + + // ShouldRetry evaluates if the retry policy should retry the request. + // When specified, the function overrides comparison against the list of + // HTTP status codes and error checking within the retry policy. Context + // and NonRetriable errors remain evaluated before calling ShouldRetry. + // The *http.Response and error parameters are mutually exclusive, i.e. + // if one is nil, the other is not nil. + // A return value of true means the retry policy should retry. + ShouldRetry func(*http.Response, error) bool +} + +// TelemetryOptions configures the telemetry policy's behavior. +type TelemetryOptions struct { + // ApplicationID is an application-specific identification string to add to the User-Agent. + // It has a maximum length of 24 characters and must not contain any spaces. + ApplicationID string + + // Disabled will prevent the addition of any telemetry data to the User-Agent. + Disabled bool +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +type TokenRequestOptions = exported.TokenRequestOptions + +// BearerTokenOptions configures the bearer token policy's behavior. +type BearerTokenOptions struct { + // AuthorizationHandler allows SDK developers to run client-specific logic when BearerTokenPolicy must authorize a request. + // When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from + // its given credential. + AuthorizationHandler AuthorizationHandler +} + +// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. +type AuthorizationHandler struct { + // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token + // from the policy's given credential. Implementations that need to perform I/O should use the Request's context, + // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't + // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a + // token from its credential according to its configuration. + OnRequest func(*Request, func(TokenRequestOptions) error) error + + // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the + // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible + // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's + // given credential. Implementations that need to perform I/O should use the Request's context, available from + // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil, + // the policy will return any 401 response to the client. + OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go new file mode 100644 index 00000000..c9cfa438 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package runtime contains various facilities for creating requests and handling responses. +// The content is intended for SDK authors. +package runtime diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go new file mode 100644 index 00000000..6d03b291 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +// NewResponseError creates an *azcore.ResponseError from the provided HTTP response. +// Call this when a service request returns a non-successful status code. +func NewResponseError(resp *http.Response) error { + return exported.NewResponseError(resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go new file mode 100644 index 00000000..5507665d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -0,0 +1,77 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" +) + +// PagingHandler contains the required data for constructing a Pager. +type PagingHandler[T any] struct { + // More returns a boolean indicating if there are more pages to fetch. + // It uses the provided page to make the determination. + More func(T) bool + + // Fetcher fetches the first and subsequent pages. + Fetcher func(context.Context, *T) (T, error) +} + +// Pager provides operations for iterating over paged responses. +type Pager[T any] struct { + current *T + handler PagingHandler[T] + firstPage bool +} + +// NewPager creates an instance of Pager using the specified PagingHandler. +// Pass a non-nil T for firstPage if the first page has already been retrieved. +func NewPager[T any](handler PagingHandler[T]) *Pager[T] { + return &Pager[T]{ + handler: handler, + firstPage: true, + } +} + +// More returns true if there are more pages to retrieve. +func (p *Pager[T]) More() bool { + if p.current != nil { + return p.handler.More(*p.current) + } + return true +} + +// NextPage advances the pager to the next page. +func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { + var resp T + var err error + if p.current != nil { + if p.firstPage { + // we get here if it's an LRO-pager, we already have the first page + p.firstPage = false + return *p.current, nil + } else if !p.handler.More(*p.current) { + return *new(T), errors.New("no more pages") + } + resp, err = p.handler.Fetcher(ctx, p.current) + } else { + // non-LRO case, first page + p.firstPage = false + resp, err = p.handler.Fetcher(ctx, nil) + } + if err != nil { + return *new(T), err + } + p.current = &resp + return *p.current, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T]. +func (p *Pager[T]) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &p.current) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go new file mode 100644 index 00000000..9d9288f5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -0,0 +1,66 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// PipelineOptions contains Pipeline options for SDK developers +type PipelineOptions struct { + AllowedHeaders, AllowedQueryParameters []string + APIVersion APIVersionOptions + PerCall, PerRetry []policy.Policy +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +type Pipeline = exported.Pipeline + +// NewPipeline creates a pipeline from connection options, with any additional policies as specified. +// Policies from ClientOptions are placed after policies from PipelineOptions. +// The module and version parameters are used by the telemetry policy, when enabled. +func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline { + cp := policy.ClientOptions{} + if options != nil { + cp = *options + } + if len(plOpts.AllowedHeaders) > 0 { + headers := make([]string, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) + copy(headers, plOpts.AllowedHeaders) + headers = append(headers, cp.Logging.AllowedHeaders...) + cp.Logging.AllowedHeaders = headers + } + if len(plOpts.AllowedQueryParameters) > 0 { + qp := make([]string, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) + copy(qp, plOpts.AllowedQueryParameters) + qp = append(qp, cp.Logging.AllowedQueryParams...) + cp.Logging.AllowedQueryParams = qp + } + // we put the includeResponsePolicy at the very beginning so that the raw response + // is populated with the final response (some policies might mutate the response) + policies := []policy.Policy{exported.PolicyFunc(includeResponsePolicy)} + if cp.APIVersion != "" { + policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion)) + } + if !cp.Telemetry.Disabled { + policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry)) + } + policies = append(policies, plOpts.PerCall...) + policies = append(policies, cp.PerCallPolicies...) + policies = append(policies, NewRetryPolicy(&cp.Retry)) + policies = append(policies, plOpts.PerRetry...) + policies = append(policies, cp.PerRetryPolicies...) + policies = append(policies, NewLogPolicy(&cp.Logging)) + policies = append(policies, exported.PolicyFunc(httpHeaderPolicy), exported.PolicyFunc(bodyDownloadPolicy)) + transport := cp.Transport + if transport == nil { + transport = defaultHTTPClient + } + return exported.NewPipeline(transport, policies...) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go new file mode 100644 index 00000000..e5309aa6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// APIVersionOptions contains options for API versions +type APIVersionOptions struct { + // Location indicates where to set the version on a request, for example in a header or query param + Location APIVersionLocation + // Name is the name of the header or query parameter, for example "api-version" + Name string +} + +// APIVersionLocation indicates which part of a request identifies the service version +type APIVersionLocation int + +const ( + // APIVersionLocationQueryParam indicates a query parameter + APIVersionLocationQueryParam = 0 + // APIVersionLocationHeader indicates a header + APIVersionLocationHeader = 1 +) + +// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version +// isn't empty and opts.Name is empty, Do will return an error. +func newAPIVersionPolicy(version string, opts *APIVersionOptions) *apiVersionPolicy { + if opts == nil { + opts = &APIVersionOptions{} + } + return &apiVersionPolicy{location: opts.Location, name: opts.Name, version: version} +} + +// apiVersionPolicy enables users to set the API version of every request a client sends. +type apiVersionPolicy struct { + // location indicates whether "name" refers to a query parameter or header. + location APIVersionLocation + + // name of the query param or header whose value should be overridden; provided by the client. + name string + + // version is the value (provided by the user) that replaces the default version value. + version string +} + +// Do sets the request's API version, if the policy is configured to do so, replacing any prior value. +func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) { + if a.version != "" { + if a.name == "" { + // user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions + return nil, errors.New("this client doesn't support overriding its API version") + } + switch a.location { + case APIVersionLocationHeader: + req.Raw().Header.Set(a.name, a.version) + case APIVersionLocationQueryParam: + q := req.Raw().URL.Query() + q.Set(a.name, a.version) + req.Raw().URL.RawQuery = q.Encode() + default: + return nil, fmt.Errorf("unknown APIVersionLocation %d", a.location) + } + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go new file mode 100644 index 00000000..b61e4c12 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -0,0 +1,116 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +type BearerTokenPolicy struct { + // mainResource is the resource to be retreived using the tenant specified in the credential + mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState] + // the following fields are read-only + authzHandler policy.AuthorizationHandler + cred exported.TokenCredential + scopes []string +} + +type acquiringResourceState struct { + req *policy.Request + p *BearerTokenPolicy + tro policy.TokenRequestOptions +} + +// acquire acquires or updates the resource; only one +// thread/goroutine at a time ever calls this function +func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken(state.req.Raw().Context(), state.tro) + if err != nil { + return exported.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} + +// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. +// cred: an azcore.TokenCredential implementation such as a credential object from azidentity +// scopes: the list of permission scopes required for the token. +// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. +func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy { + if opts == nil { + opts = &policy.BearerTokenOptions{} + } + return &BearerTokenPolicy{ + authzHandler: opts.AuthorizationHandler, + cred: cred, + scopes: scopes, + mainResource: temporal.NewResource(acquire), + } +} + +// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential +func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error { + return func(tro policy.TokenRequestOptions) error { + as := acquiringResourceState{p: b, req: req, tro: tro} + tk, err := b.mainResource.Get(as) + if err != nil { + return err + } + req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token) + return nil + } +} + +// Do authorizes a request with a bearer token +func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { + var err error + if b.authzHandler.OnRequest != nil { + err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) + } else { + err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) + } + if err != nil { + return nil, ensureNonRetriable(err) + } + + res, err := req.Next() + if err != nil { + return nil, err + } + + if res.StatusCode == http.StatusUnauthorized { + b.mainResource.Expire() + if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil { + if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { + res, err = req.Next() + } + } + } + return res, ensureNonRetriable(err) +} + +func ensureNonRetriable(err error) error { + var nre errorinfo.NonRetriable + if err != nil && !errors.As(err, &nre) { + err = btpError{err} + } + return err +} + +// btpError is a wrapper that ensures RetryPolicy doesn't retry requests BearerTokenPolicy couldn't authorize +type btpError struct { + error +} + +func (btpError) NonRetriable() {} + +var _ errorinfo.NonRetriable = (*btpError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go new file mode 100644 index 00000000..99dc029f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go @@ -0,0 +1,72 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +// bodyDownloadPolicy creates a policy object that downloads the response's body to a []byte. +func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if err != nil { + return resp, err + } + var opValues bodyDownloadPolicyOpValues + // don't skip downloading error response bodies + if req.OperationValue(&opValues); opValues.Skip && resp.StatusCode < 400 { + return resp, err + } + // Either bodyDownloadPolicyOpValues was not specified (so skip is false) + // or it was specified and skip is false: don't skip downloading the body + _, err = Payload(resp) + if err != nil { + return resp, newBodyDownloadError(err, req) + } + return resp, err +} + +// bodyDownloadPolicyOpValues is the struct containing the per-operation values +type bodyDownloadPolicyOpValues struct { + Skip bool +} + +type bodyDownloadError struct { + err error +} + +func newBodyDownloadError(err error, req *policy.Request) error { + // on failure, only retry the request for idempotent operations. + // we currently identify them as DELETE, GET, and PUT requests. + if m := strings.ToUpper(req.Raw().Method); m == http.MethodDelete || m == http.MethodGet || m == http.MethodPut { + // error is safe for retry + return err + } + // wrap error to avoid retries + return &bodyDownloadError{ + err: err, + } +} + +func (b *bodyDownloadError) Error() string { + return fmt.Sprintf("body download policy: %s", b.err.Error()) +} + +func (b *bodyDownloadError) NonRetriable() { + // marker method +} + +func (b *bodyDownloadError) Unwrap() error { + return b.err +} + +var _ errorinfo.NonRetriable = (*bodyDownloadError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go new file mode 100644 index 00000000..770e0a2b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// newHTTPHeaderPolicy creates a policy object that adds custom HTTP headers to a request +func httpHeaderPolicy(req *policy.Request) (*http.Response, error) { + // check if any custom HTTP headers have been specified + if header := req.Raw().Context().Value(shared.CtxWithHTTPHeaderKey{}); header != nil { + for k, v := range header.(http.Header) { + // use Set to replace any existing value + // it also canonicalizes the header key + req.Raw().Header.Set(k, v[0]) + // add any remaining values + for i := 1; i < len(v); i++ { + req.Raw().Header.Add(k, v[i]) + } + } + } + return req.Next() +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go new file mode 100644 index 00000000..4714baa3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go @@ -0,0 +1,34 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request +func includeResponsePolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if resp == nil { + return resp, err + } + if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil { + httpOut := httpOutRaw.(**http.Response) + *httpOut = resp + } + return resp, err +} + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go new file mode 100644 index 00000000..8514f57d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -0,0 +1,263 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/diag" +) + +type logPolicy struct { + includeBody bool + allowedHeaders map[string]struct{} + allowedQP map[string]struct{} +} + +// NewLogPolicy creates a request/response logging policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewLogPolicy(o *policy.LogOptions) policy.Policy { + if o == nil { + o = &policy.LogOptions{} + } + // construct default hash set of allowed headers + allowedHeaders := map[string]struct{}{ + "accept": {}, + "cache-control": {}, + "connection": {}, + "content-length": {}, + "content-type": {}, + "date": {}, + "etag": {}, + "expires": {}, + "if-match": {}, + "if-modified-since": {}, + "if-none-match": {}, + "if-unmodified-since": {}, + "last-modified": {}, + "ms-cv": {}, + "pragma": {}, + "request-id": {}, + "retry-after": {}, + "server": {}, + "traceparent": {}, + "transfer-encoding": {}, + "user-agent": {}, + "www-authenticate": {}, + "x-ms-request-id": {}, + "x-ms-client-request-id": {}, + "x-ms-return-client-request-id": {}, + } + // add any caller-specified allowed headers to the set + for _, ah := range o.AllowedHeaders { + allowedHeaders[strings.ToLower(ah)] = struct{}{} + } + // now do the same thing for query params + allowedQP := getAllowedQueryParams(o.AllowedQueryParams) + return &logPolicy{ + includeBody: o.IncludeBody, + allowedHeaders: allowedHeaders, + allowedQP: allowedQP, + } +} + +// getAllowedQueryParams merges the default set of allowed query parameters +// with a custom set (usually comes from client options). +func getAllowedQueryParams(customAllowedQP []string) map[string]struct{} { + allowedQP := map[string]struct{}{ + "api-version": {}, + } + for _, qp := range customAllowedQP { + allowedQP[strings.ToLower(qp)] = struct{}{} + } + return allowedQP +} + +// logPolicyOpValues is the struct containing the per-operation values +type logPolicyOpValues struct { + try int32 + start time.Time +} + +func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) { + // Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object. + var opValues logPolicyOpValues + if req.OperationValue(&opValues); opValues.start.IsZero() { + opValues.start = time.Now() // If this is the 1st try, record this operation's start time + } + opValues.try++ // The first try is #1 (not #0) + req.SetOperationValue(opValues) + + // Log the outgoing request as informational + if log.Should(log.EventRequest) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try) + p.writeRequestWithResponse(b, req, nil, nil) + var err error + if p.includeBody { + err = writeReqBody(req, b) + } + log.Write(log.EventRequest, b.String()) + if err != nil { + return nil, err + } + } + + // Set the time for this particular retry operation and then Do the operation. + tryStart := time.Now() + response, err := req.Next() // Make the request + tryEnd := time.Now() + tryDuration := tryEnd.Sub(tryStart) + opDuration := tryEnd.Sub(opValues.start) + + if log.Should(log.EventResponse) { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v, OpTime=%v) -- ", opValues.try, tryDuration, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + fmt.Fprint(b, "RESPONSE RECEIVED\n") + } + + p.writeRequestWithResponse(b, req, response, err) + if err != nil { + // skip frames runtime.Callers() and runtime.StackTrace() + b.WriteString(diag.StackTrace(2, 32)) + } else if p.includeBody { + err = writeRespBody(response, b) + } + log.Write(log.EventResponse, b.String()) + } + return response, err +} + +const redactedValue = "REDACTED" + +// getSanitizedURL returns a sanitized string for the provided url.URL +func getSanitizedURL(u url.URL, allowedQueryParams map[string]struct{}) string { + // redact applicable query params + qp := u.Query() + for k := range qp { + if _, ok := allowedQueryParams[strings.ToLower(k)]; !ok { + qp.Set(k, redactedValue) + } + } + u.RawQuery = qp.Encode() + return u.String() +} + +// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) { + // Write the request into the buffer. + fmt.Fprint(b, " "+req.Raw().Method+" "+getSanitizedURL(*req.Raw().URL, p.allowedQP)+"\n") + p.writeHeader(b, req.Raw().Header) + if resp != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n") + p.writeHeader(b, resp.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + value := header.Get(k) + // redact all header values not in the allow-list + if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok { + value = redactedValue + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} + +// returns true if the request/response body should be logged. +// this is determined by looking at the content-type header value. +func shouldLogBody(b *bytes.Buffer, contentType string) bool { + contentType = strings.ToLower(contentType) + if strings.HasPrefix(contentType, "text") || + strings.Contains(contentType, "json") || + strings.Contains(contentType, "xml") { + return true + } + fmt.Fprintf(b, " Skip logging body for %s\n", contentType) + return false +} + +// writes to a buffer, used for logging purposes +func writeReqBody(req *policy.Request, b *bytes.Buffer) error { + if req.Raw().Body == nil { + fmt.Fprint(b, " Request contained no body\n") + return nil + } + if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) { + return nil + } + body, err := io.ReadAll(req.Raw().Body) + if err != nil { + fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error()) + return err + } + if err := req.RewindBody(); err != nil { + return err + } + logBody(b, body) + return nil +} + +// writes to a buffer, used for logging purposes +func writeRespBody(resp *http.Response, b *bytes.Buffer) error { + ct := resp.Header.Get(shared.HeaderContentType) + if ct == "" { + fmt.Fprint(b, " Response contained no body\n") + return nil + } else if !shouldLogBody(b, ct) { + return nil + } + body, err := Payload(resp) + if err != nil { + fmt.Fprintf(b, " Failed to read response body: %s\n", err.Error()) + return err + } + if len(body) > 0 { + logBody(b, body) + } else { + fmt.Fprint(b, " Response contained no body\n") + } + return nil +} + +func logBody(b *bytes.Buffer, body []byte) { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprintln(b, string(body)) + fmt.Fprintln(b, " --------------------------------------------------------------------------------") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go new file mode 100644 index 00000000..360a7f21 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +type requestIDPolicy struct{} + +// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header +func NewRequestIDPolicy() policy.Policy { + return &requestIDPolicy{} +} + +func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) { + if req.Raw().Header.Get(shared.HeaderXMSClientRequestID) == "" { + id, err := uuid.New() + if err != nil { + return nil, err + } + req.Raw().Header.Set(shared.HeaderXMSClientRequestID, id.String()) + } + + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go new file mode 100644 index 00000000..e0c5929f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -0,0 +1,262 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "io" + "math" + "math/rand" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +const ( + defaultMaxRetries = 3 +) + +func setDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = defaultMaxRetries + } else if o.MaxRetries < 0 { + o.MaxRetries = 0 + } + + // SDK guidelines specify the default MaxRetryDelay is 60 seconds + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 60 * time.Second + } else if o.MaxRetryDelay < 0 { + // not really an unlimited cap, but sufficiently large enough to be considered as such + o.MaxRetryDelay = math.MaxInt64 + } + if o.RetryDelay == 0 { + o.RetryDelay = 800 * time.Millisecond + } else if o.RetryDelay < 0 { + o.RetryDelay = 0 + } + if o.StatusCodes == nil { + // NOTE: if you change this list, you MUST update the docs in policy/policy.go + o.StatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + } +} + +func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 + pow := func(number int64, exponent int32) int64 { // pow is nested helper function + var result int64 = 1 + for n := int32(0); n < exponent; n++ { + result *= number + } + return result + } + + delay := time.Duration(pow(2, try)-1) * o.RetryDelay + + // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) + delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand + if delay > o.MaxRetryDelay { + delay = o.MaxRetryDelay + } + return delay +} + +// NewRetryPolicy creates a policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewRetryPolicy(o *policy.RetryOptions) policy.Policy { + if o == nil { + o = &policy.RetryOptions{} + } + p := &retryPolicy{options: *o} + return p +} + +type retryPolicy struct { + options policy.RetryOptions +} + +func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + options := p.options + // check if the retry options have been overridden for this call + if override := req.Raw().Context().Value(shared.CtxWithRetryOptionsKey{}); override != nil { + options = override.(policy.RetryOptions) + } + setDefaults(&options) + // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) + // When to retry: connection failure or temporary/timeout. + var rwbody *retryableRequestBody + if req.Body() != nil { + // wrap the body so we control when it's actually closed. + // do this outside the for loop so defers don't accumulate. + rwbody = &retryableRequestBody{body: req.Body()} + defer rwbody.realClose() + } + try := int32(1) + for { + resp = nil // reset + log.Writef(log.EventRetryPolicy, "=====> Try=%d", try) + + // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because + // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // 1st try as for additional tries. + err = req.RewindBody() + if err != nil { + return + } + // RewindBody() restores Raw().Body to its original state, so set our rewindable after + if rwbody != nil { + req.Raw().Body = rwbody + } + + if options.TryTimeout == 0 { + clone := req.Clone(req.Raw().Context()) + resp, err = clone.Next() + } else { + // Set the per-try time for this particular retry operation and then Do the operation. + tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout) + clone := req.Clone(tryCtx) + resp, err = clone.Next() // Make the request + // if the body was already downloaded or there was an error it's safe to cancel the context now + if err != nil { + tryCancel() + } else if exported.PayloadDownloaded(resp) { + tryCancel() + } else { + // must cancel the context after the body has been read and closed + resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body} + } + } + if err == nil { + log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode) + } else { + log.Writef(log.EventRetryPolicy, "error %v", err) + } + + if ctxErr := req.Raw().Context().Err(); ctxErr != nil { + // don't retry if the parent context has been cancelled or its deadline exceeded + err = ctxErr + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + + // check if the error is not retriable + var nre errorinfo.NonRetriable + if errors.As(err, &nre) { + // the error says it's not retriable so don't retry + log.Writef(log.EventRetryPolicy, "non-retriable error %T", nre) + return + } + + if options.ShouldRetry != nil { + // a non-nil ShouldRetry overrides our HTTP status code check + if !options.ShouldRetry(resp, err) { + // predicate says we shouldn't retry + log.Write(log.EventRetryPolicy, "exit due to ShouldRetry") + return + } + } else if err == nil && !HasStatusCode(resp, options.StatusCodes...) { + // if there is no error and the response code isn't in the list of retry codes then we're done. + log.Write(log.EventRetryPolicy, "exit due to non-retriable status code") + return + } + + if try == options.MaxRetries+1 { + // max number of tries has been reached, don't sleep again + log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries) + return + } + + // use the delay from retry-after if available + delay := shared.RetryAfter(resp) + if delay <= 0 { + delay = calcDelay(options, try) + } else if delay > options.MaxRetryDelay { + // the retry-after delay exceeds the the cap so don't retry + log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay) + return + } + + // drain before retrying so nothing is leaked + Drain(resp) + + log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay) + select { + case <-time.After(delay): + try++ + case <-req.Raw().Context().Done(): + err = req.Raw().Context().Err() + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + } +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context { + return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) +} + +// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type retryableRequestBody struct { + body io.ReadSeeker // Seeking is required to support retries +} + +// Read reads a block of data from an inner stream and reports progress +func (b *retryableRequestBody) Read(p []byte) (n int, err error) { + return b.body.Read(p) +} + +func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return b.body.Seek(offset, whence) +} + +func (b *retryableRequestBody) Close() error { + // We don't want the underlying transport to close the request body on transient failures so this is a nop. + // The retry policy closes the request body upon success. + return nil +} + +func (b *retryableRequestBody) realClose() error { + if c, ok := b.body.(io.Closer); ok { + return c.Close() + } + return nil +} + +// ********** The following type/methods implement the contextCancelReadCloser + +// contextCancelReadCloser combines an io.ReadCloser with a cancel func. +// it ensures the cancel func is invoked once the body has been read and closed. +type contextCancelReadCloser struct { + cf context.CancelFunc + body io.ReadCloser +} + +func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { + return rc.body.Read(p) +} + +func (rc *contextCancelReadCloser) Close() error { + err := rc.body.Close() + rc.cf() + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go new file mode 100644 index 00000000..2abcdc57 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "net/http" + "os" + "runtime" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +type telemetryPolicy struct { + telemetryValue string +} + +// NewTelemetryPolicy creates a telemetry policy object that adds telemetry information to outgoing HTTP requests. +// The format is [ ]azsdk-go-/ . +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Policy { + if o == nil { + o = &policy.TelemetryOptions{} + } + tp := telemetryPolicy{} + if o.Disabled { + return &tp + } + b := &bytes.Buffer{} + // normalize ApplicationID + if o.ApplicationID != "" { + o.ApplicationID = strings.ReplaceAll(o.ApplicationID, " ", "/") + if len(o.ApplicationID) > 24 { + o.ApplicationID = o.ApplicationID[:24] + } + b.WriteString(o.ApplicationID) + b.WriteRune(' ') + } + b.WriteString(formatTelemetry(mod, ver)) + b.WriteRune(' ') + b.WriteString(platformInfo) + tp.telemetryValue = b.String() + return &tp +} + +func formatTelemetry(comp, ver string) string { + return fmt.Sprintf("azsdk-go-%s/%s", comp, ver) +} + +func (p telemetryPolicy) Do(req *policy.Request) (*http.Response, error) { + if p.telemetryValue == "" { + return req.Next() + } + // preserve the existing User-Agent string + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + p.telemetryValue = fmt.Sprintf("%s %s", p.telemetryValue, ua) + } + req.Raw().Header.Set(shared.HeaderUserAgent, p.telemetryValue) + return req.Next() +} + +// NOTE: the ONLY function that should write to this variable is this func +var platformInfo = func() string { + operatingSystem := runtime.GOOS // Default OS string + switch operatingSystem { + case "windows": + operatingSystem = os.Getenv("OS") // Get more specific OS information + case "linux": // accept default OS info + case "freebsd": // accept default OS info + } + return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) +}() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go new file mode 100644 index 00000000..3d029a3d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -0,0 +1,327 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia = pollers.FinalStateVia + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation = pollers.FinalStateViaLocation + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation = pollers.FinalStateViaOpLocation +) + +// NewPollerOptions contains the optional parameters for NewPoller. +type NewPollerOptions[T any] struct { + // FinalStateVia contains the final-state-via value for the LRO. + FinalStateVia FinalStateVia + + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPoller creates a Poller based on the provided initial response. +func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + if options.Handler != nil { + return &Poller[T]{ + op: options.Handler, + resp: resp, + result: result, + }, nil + } + + defer resp.Body.Close() + // this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success). + // ideally the codegen should return an error if the initial response failed and not even create a poller. + if !poller.StatusCodeValid(resp) { + return nil, errors.New("the operation failed or was cancelled") + } + + // determine the polling method + var opr PollingHandler[T] + var err error + if async.Applicable(resp) { + // async poller must be checked first as it can also have a location header + opr, err = async.New[T](pl, resp, options.FinalStateVia) + } else if op.Applicable(resp) { + // op poller must be checked before loc as it can also have a location header + opr, err = op.New[T](pl, resp, options.FinalStateVia) + } else if loc.Applicable(resp) { + opr, err = loc.New[T](pl, resp) + } else if body.Applicable(resp) { + // must test body poller last as it's a subset of the other pollers. + // TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion) + opr, err = body.New[T](pl, resp) + } else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) { + // if we get here it means we have a 202 with no polling headers. + // for DELETE and POST this is a hard error per ARM RPC spec. + return nil, errors.New("response is missing polling URL") + } else { + opr, err = pollers.NewNopPoller[T](resp) + } + + if err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + resp: resp, + result: result, + }, nil +} + +// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken. +type NewPollerFromResumeTokenOptions[T any] struct { + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPollerFromResumeToken creates a Poller from a resume token string. +func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerFromResumeTokenOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + + if err := pollers.IsTokenValid[T](token); err != nil { + return nil, err + } + raw, err := pollers.ExtractToken(token) + if err != nil { + return nil, err + } + var asJSON map[string]interface{} + if err := json.Unmarshal(raw, &asJSON); err != nil { + return nil, err + } + + opr := options.Handler + // now rehydrate the poller based on the encoded poller type + if opr != nil { + log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) + } else if async.CanResume(asJSON) { + opr, _ = async.New[T](pl, nil, "") + } else if body.CanResume(asJSON) { + opr, _ = body.New[T](pl, nil) + } else if loc.CanResume(asJSON) { + opr, _ = loc.New[T](pl, nil) + } else if op.CanResume(asJSON) { + opr, _ = op.New[T](pl, nil, "") + } else { + return nil, fmt.Errorf("unhandled poller token %s", string(raw)) + } + if err := json.Unmarshal(raw, &opr); err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + result: result, + }, nil +} + +// PollingHandler[T] abstracts the differences among poller implementations. +type PollingHandler[T any] interface { + // Done returns true if the LRO has reached a terminal state. + Done() bool + + // Poll fetches the latest state of the LRO. + Poll(context.Context) (*http.Response, error) + + // Result is called once the LRO has reached a terminal state. It populates the out parameter + // with the result of the operation. + Result(ctx context.Context, out *T) error +} + +// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. +type Poller[T any] struct { + op PollingHandler[T] + resp *http.Response + err error + result *T + done bool +} + +// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method. +type PollUntilDoneOptions struct { + // Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second. + // Pass zero to accept the default value (30s). + Frequency time.Duration +} + +// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires. +// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals. +// options: pass nil to accept the default values. +// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might +// benefit from a shorter or longer duration. +func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) { + if options == nil { + options = &PollUntilDoneOptions{} + } + cp := *options + if cp.Frequency == 0 { + cp.Frequency = 30 * time.Second + } + + // skip the floor check when executing tests so they don't take so long + if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second { + return *new(T), errors.New("polling frequency minimum is one second") + } + + start := time.Now() + logPollUntilDoneExit := func(v interface{}) { + log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start)) + } + log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op) + if p.resp != nil { + // initial check for a retry-after header existing on the initial response + if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { + log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) + if err := shared.Delay(ctx, retryAfter); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } + } + // begin polling the endpoint until a terminal state is reached + for { + resp, err := p.Poll(ctx) + if err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + if p.Done() { + logPollUntilDoneExit("succeeded") + return p.Result(ctx) + } + d := cp.Frequency + if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { + log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String()) + d = retryAfter + } else { + log.Writef(log.EventLRO, "delay for %s", d.String()) + } + if err = shared.Delay(ctx, d); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } +} + +// Poll fetches the latest state of the LRO. It returns an HTTP response or error. +// If Poll succeeds, the poller's state is updated and the HTTP response is returned. +// If Poll fails, the poller's state is unmodified and the error is returned. +// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + if p.Done() { + // the LRO has reached a terminal state, don't poll again + return p.resp, nil + } + resp, err := p.op.Poll(ctx) + if err != nil { + return nil, err + } + p.resp = resp + return p.resp, nil +} + +// Done returns true if the LRO has reached a terminal state. +// Once a terminal state is reached, call Result(). +func (p *Poller[T]) Done() bool { + return p.op.Done() +} + +// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done. +// If the LRO completed successfully, a populated instance of T is returned. +// If the LRO failed or was canceled, an *azcore.ResponseError error is returned. +// Calling this on an LRO in a non-terminal state will return an error. +func (p *Poller[T]) Result(ctx context.Context) (T, error) { + if !p.Done() { + return *new(T), errors.New("poller is in a non-terminal state") + } + if p.done { + // the result has already been retrieved, return the cached value + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil + } + err := p.op.Result(ctx, p.result) + var respErr *exported.ResponseError + if errors.As(err, &respErr) { + // the LRO failed. record the error + p.err = err + } else if err != nil { + // the call to Result failed, don't cache anything in this case + return *new(T), err + } + p.done = true + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil +} + +// ResumeToken returns a value representing the poller that can be used to resume +// the LRO at a later time. ResumeTokens are unique per service operation. +// The token's format should be considered opaque and is subject to change. +// Calling this on an LRO in a terminal state will return an error. +func (p *Poller[T]) ResumeToken() (string, error) { + if p.Done() { + return "", errors.New("poller is in a terminal state") + } + tk, err := pollers.NewResumeToken[T](p.op) + if err != nil { + return "", err + } + return tk, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go new file mode 100644 index 00000000..98e00718 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -0,0 +1,248 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "mime/multipart" + "os" + "path" + "reflect" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +type Base64Encoding int + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = 0 + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = 1 +) + +// NewRequest creates a new policy.Request with the specified input. +// The endpoint MUST be properly encoded before calling this function. +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) { + return exported.NewRequest(ctx, httpMethod, endpoint) +} + +// JoinPaths concatenates multiple URL path segments into one path, +// inserting path separation characters as required. JoinPaths will preserve +// query parameters in the root path +func JoinPaths(root string, paths ...string) string { + if len(paths) == 0 { + return root + } + + qps := "" + if strings.Contains(root, "?") { + splitPath := strings.Split(root, "?") + root, qps = splitPath[0], splitPath[1] + } + + p := path.Join(paths...) + // path.Join will remove any trailing slashes. + // if one was provided, preserve it. + if strings.HasSuffix(paths[len(paths)-1], "/") && !strings.HasSuffix(p, "/") { + p += "/" + } + + if qps != "" { + p = p + "?" + qps + } + + if strings.HasSuffix(root, "/") && strings.HasPrefix(p, "/") { + root = root[:len(root)-1] + } else if !strings.HasSuffix(root, "/") && !strings.HasPrefix(p, "/") { + p = "/" + p + } + return root + p +} + +// EncodeByteArray will base-64 encode the byte slice v. +func EncodeByteArray(v []byte, format Base64Encoding) string { + if format == Base64URLFormat { + return base64.RawURLEncoding.EncodeToString(v) + } + return base64.StdEncoding.EncodeToString(v) +} + +// MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody. +// The encoded value is treated as a JSON string. +func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error { + // send as a JSON string + encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format)) + return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON) +} + +// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. +func MarshalAsJSON(req *policy.Request, v interface{}) error { + if omit := os.Getenv("AZURE_SDK_GO_OMIT_READONLY"); omit == "true" { + v = cloneWithoutReadOnlyFields(v) + } + b, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON) +} + +// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. +func MarshalAsXML(req *policy.Request, v interface{}) error { + b, err := xml.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + // inclue the XML header as some services require it + b = []byte(xml.Header + string(b)) + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML) +} + +// SetMultipartFormData writes the specified keys/values as multi-part form +// fields with the specified value. File content must be specified as a ReadSeekCloser. +// All other values are treated as string values. +func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error { + body := bytes.Buffer{} + writer := multipart.NewWriter(&body) + + writeContent := func(fieldname, filename string, src io.Reader) error { + fd, err := writer.CreateFormFile(fieldname, filename) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, src); err != nil { + return err + } + return nil + } + + for k, v := range formData { + if rsc, ok := v.(io.ReadSeekCloser); ok { + if err := writeContent(k, k, rsc); err != nil { + return err + } + continue + } else if rscs, ok := v.([]io.ReadSeekCloser); ok { + for _, rsc := range rscs { + if err := writeContent(k, k, rsc); err != nil { + return err + } + } + continue + } + // ensure the value is in string format + s, ok := v.(string) + if !ok { + s = fmt.Sprintf("%v", v) + } + if err := writer.WriteField(k, s); err != nil { + return err + } + } + if err := writer.Close(); err != nil { + return err + } + return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType()) +} + +// SkipBodyDownload will disable automatic downloading of the response body. +func SkipBodyDownload(req *policy.Request) { + req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) +} + +// returns a clone of the object graph pointed to by v, omitting values of all read-only +// fields. if there are no read-only fields in the object graph, no clone is created. +func cloneWithoutReadOnlyFields(v interface{}) interface{} { + val := reflect.Indirect(reflect.ValueOf(v)) + if val.Kind() != reflect.Struct { + // not a struct, skip + return v + } + // first walk the graph to find any R/O fields. + // if there aren't any, skip cloning the graph. + if !recursiveFindReadOnlyField(val) { + return v + } + return recursiveCloneWithoutReadOnlyFields(val) +} + +// returns true if any field in the object graph of val contains the `azure:"ro"` tag value +func recursiveFindReadOnlyField(val reflect.Value) bool { + t := val.Type() + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + return true + } else if reflect.Indirect(val.Field(i)).Kind() == reflect.Struct && recursiveFindReadOnlyField(reflect.Indirect(val.Field(i))) { + return true + } + } + return false +} + +// clones the object graph of val. all non-R/O properties are copied to the clone +func recursiveCloneWithoutReadOnlyFields(val reflect.Value) interface{} { + t := val.Type() + clone := reflect.New(t) + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + // omit from payload + continue + } + // clone field will receive the same value as the source field... + value := val.Field(i) + v := reflect.Indirect(value) + if v.IsValid() && v.Type() != reflect.TypeOf(time.Time{}) && v.Kind() == reflect.Struct { + // ...unless the source value is a struct, in which case we recurse to clone that struct. + // (We can't recursively clone time.Time because it contains unexported fields.) + c := recursiveCloneWithoutReadOnlyFields(v) + if field.Anonymous { + // NOTE: this does not handle the case of embedded fields of unexported struct types. + // this should be ok as we don't generate any code like this at present + value = reflect.Indirect(reflect.ValueOf(c)) + } else { + value = reflect.ValueOf(c) + } + } + reflect.Indirect(clone).Field(i).Set(value) + } + return clone.Interface() +} + +// returns true if the "azure" tag contains the option "ro" +func azureTagIsReadOnly(tag string) bool { + if tag == "" { + return false + } + parts := strings.Split(tag, ",") + for _, part := range parts { + if part == "ro" { + return true + } + } + return false +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go new file mode 100644 index 00000000..d1f58e9e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -0,0 +1,135 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +func Payload(resp *http.Response) ([]byte, error) { + return exported.Payload(resp, nil) +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + return exported.HasStatusCode(resp, statusCodes...) +} + +// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v. +func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) error { + p, err := Payload(resp) + if err != nil { + return err + } + return DecodeByteArray(string(p), v, format) +} + +// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsJSON(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = json.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsXML(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = xml.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// Drain reads the response body to completion then closes it. The bytes read are discarded. +func Drain(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } +} + +// removeBOM removes any byte-order mark prefix from the payload if present. +func removeBOM(resp *http.Response) error { + _, err := exported.Payload(resp, &exported.PayloadOptions{ + BytesModifier: func(b []byte) []byte { + // UTF8 + return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + }, + }) + if err != nil { + return err + } + return nil +} + +// DecodeByteArray will base-64 decode the provided string into v. +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + if len(s) == 0 { + return nil + } + payload := string(s) + if payload[0] == '"' { + // remove surrounding quotes + payload = payload[1 : len(payload)-1] + } + switch format { + case Base64StdFormat: + decoded, err := base64.StdEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + case Base64URLFormat: + // use raw encoding as URL format should not contain any '=' characters + decoded, err := base64.RawURLEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + default: + return fmt.Errorf("unrecognized byte array format: %d", format) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go new file mode 100644 index 00000000..869bed51 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +var defaultHTTPClient *http.Client + +func init() { + defaultTransport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + defaultHTTPClient = &http.Client{ + Transport: defaultTransport, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go new file mode 100644 index 00000000..cadaef3d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package streaming contains helpers for streaming IO operations and progress reporting. +package streaming diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go new file mode 100644 index 00000000..fbcd4831 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package streaming + +import ( + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +type progress struct { + rc io.ReadCloser + rsc io.ReadSeekCloser + pr func(bytesTransferred int64) + offset int64 +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// In addition to adding a Close method to an io.ReadSeeker, this can also be used to wrap an +// io.ReadSeekCloser with a no-op Close method to allow explicit control of when the io.ReedSeekCloser +// has its underlying stream closed. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return exported.NopCloser(rs) +} + +// NewRequestProgress adds progress reporting to an HTTP request's body stream. +func NewRequestProgress(body io.ReadSeekCloser, pr func(bytesTransferred int64)) io.ReadSeekCloser { + return &progress{ + rc: body, + rsc: body, + pr: pr, + offset: 0, + } +} + +// NewResponseProgress adds progress reporting to an HTTP response's body stream. +func NewResponseProgress(body io.ReadCloser, pr func(bytesTransferred int64)) io.ReadCloser { + return &progress{ + rc: body, + rsc: nil, + pr: pr, + offset: 0, + } +} + +// Read reads a block of data from an inner stream and reports progress +func (p *progress) Read(b []byte) (n int, err error) { + n, err = p.rc.Read(b) + if err != nil && err != io.EOF { + return + } + p.offset += int64(n) + // Invokes the user's callback method to report progress + p.pr(p.offset) + return +} + +// Seek only expects a zero or from beginning. +func (p *progress) Seek(offset int64, whence int) (int64, error) { + // This should only ever be called with offset = 0 and whence = io.SeekStart + n, err := p.rsc.Seek(offset, whence) + if err == nil { + p.offset = int64(n) + } + return n, err +} + +// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. +func (p *progress) Close() error { + return p.rc.Close() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go new file mode 100644 index 00000000..faa98c9d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package to contains various type-conversion helper functions. +package to diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go new file mode 100644 index 00000000..e0e4817b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go @@ -0,0 +1,21 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package to + +// Ptr returns a pointer to the provided value. +func Ptr[T any](v T) *T { + return &v +} + +// SliceOfPtrs returns a slice of *T from the specified values. +func SliceOfPtrs[T any](vv ...T) []*T { + slc := make([]*T, len(vv)) + for i := range vv { + slc[i] = Ptr(vv[i]) + } + return slc +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go new file mode 100644 index 00000000..80282d4a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tracing + +// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span will be processed and visualized by various backends. +type SpanKind int + +const ( + // SpanKindInternal indicates the span represents an internal operation within an application. + SpanKindInternal SpanKind = 1 + + // SpanKindServer indicates the span covers server-side handling of a request. + SpanKindServer SpanKind = 2 + + // SpanKindClient indicates the span describes a request to a remote service. + SpanKindClient SpanKind = 3 + + // SpanKindProducer indicates the span was created by a messaging producer. + SpanKindProducer SpanKind = 4 + + // SpanKindConsumer indicates the span was created by a messaging consumer. + SpanKindConsumer SpanKind = 5 +) + +// SpanStatus represents the status of a span. +type SpanStatus int + +const ( + // SpanStatusUnset is the default status code. + SpanStatusUnset SpanStatus = 0 + + // SpanStatusError indicates the operation contains an error. + SpanStatusError SpanStatus = 1 + + // SpanStatusOK indicates the operation completed successfully. + SpanStatusOK SpanStatus = 2 +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go new file mode 100644 index 00000000..75f757ce --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go @@ -0,0 +1,168 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package tracing contains the definitions needed to support distributed tracing. +package tracing + +import ( + "context" +) + +// ProviderOptions contains the optional values when creating a Provider. +type ProviderOptions struct { + // for future expansion +} + +// NewProvider creates a new Provider with the specified values. +// - newTracerFn is the underlying implementation for creating Tracer instances +// - options contains optional values; pass nil to accept the default value +func NewProvider(newTracerFn func(name, version string) Tracer, options *ProviderOptions) Provider { + return Provider{ + newTracerFn: newTracerFn, + } +} + +// Provider is the factory that creates Tracer instances. +// It defaults to a no-op provider. +type Provider struct { + newTracerFn func(name, version string) Tracer +} + +// NewTracer creates a new Tracer for the specified name and version. +// - name - the name of the tracer object, typically the fully qualified name of the service client +// - version - the version of the module in which the service client resides +func (p Provider) NewTracer(name, version string) (tracer Tracer) { + if p.newTracerFn != nil { + tracer = p.newTracerFn(name, version) + } + return +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// TracerOptions contains the optional values when creating a Tracer. +type TracerOptions struct { + // for future expansion +} + +// NewTracer creates a Tracer with the specified values. +// - newSpanFn is the underlying implementation for creating Span instances +// - options contains optional values; pass nil to accept the default value +func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { + return Tracer{ + newSpanFn: newSpanFn, + } +} + +// Tracer is the factory that creates Span instances. +type Tracer struct { + newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) +} + +// Start creates a new span and a context.Context that contains it. +// - ctx is the parent context for this span. If it contains a Span, the newly created span will be a child of that span, else it will be a root span +// - spanName identifies the span within a trace, it's typically the fully qualified API name +// - options contains optional values for the span, pass nil to accept any defaults +func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { + if t.newSpanFn != nil { + return t.newSpanFn(ctx, spanName, options) + } + return ctx, Span{} +} + +// SpanOptions contains optional settings for creating a span. +type SpanOptions struct { + // Kind indicates the kind of Span. + Kind SpanKind + + // Attributes contains key-value pairs of attributes for the span. + Attributes []Attribute +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// SpanImpl abstracts the underlying implementation for Span, +// allowing it to work with various tracing implementations. +// Any zero-values will have their default, no-op behavior. +type SpanImpl struct { + // End contains the implementation for the Span.End method. + End func() + + // SetAttributes contains the implementation for the Span.SetAttributes method. + SetAttributes func(...Attribute) + + // AddEvent contains the implementation for the Span.AddEvent method. + AddEvent func(string, ...Attribute) + + // AddError contains the implementation for the Span.AddError method. + AddError func(err error) + + // SetStatus contains the implementation for the Span.SetStatus method. + SetStatus func(SpanStatus, string) +} + +// NewSpan creates a Span with the specified implementation. +func NewSpan(impl SpanImpl) Span { + return Span{ + impl: impl, + } +} + +// Span is a single unit of a trace. A trace can contain multiple spans. +// A zero-value Span provides a no-op implementation. +type Span struct { + impl SpanImpl +} + +// End terminates the span and MUST be called before the span leaves scope. +// Any further updates to the span will be ignored after End is called. +func (s Span) End() { + if s.impl.End != nil { + s.impl.End() + } +} + +// SetAttributes sets the specified attributes on the Span. +// Any existing attributes with the same keys will have their values overwritten. +func (s Span) SetAttributes(attrs ...Attribute) { + if s.impl.SetAttributes != nil { + s.impl.SetAttributes(attrs...) + } +} + +// AddEvent adds a named event with an optional set of attributes to the span. +func (s Span) AddEvent(name string, attrs ...Attribute) { + if s.impl.AddEvent != nil { + s.impl.AddEvent(name, attrs...) + } +} + +// AddError adds the specified error event to the span. +func (s Span) AddError(err error) { + if s.impl.AddError != nil { + s.impl.AddError(err) + } +} + +// SetStatus sets the status on the span along with a description. +func (s Span) SetStatus(code SpanStatus, desc string) { + if s.impl.SetStatus != nil { + s.impl.SetStatus(code, desc) + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Attribute is a key-value pair. +type Attribute struct { + // Key is the name of the attribute. + Key string + + // Value is the attribute's value. + // Types that are natively supported include int64, float64, int, bool, string. + // Any other type will be formatted per rules of fmt.Sprintf("%v"). + Value any +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md new file mode 100644 index 00000000..cc8034cf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -0,0 +1,409 @@ +# Release History + +## 1.3.0 (2023-05-09) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.3.0-beta.5 +* Renamed `NewOnBehalfOfCredentialFromCertificate` to `NewOnBehalfOfCredentialWithCertificate` +* Renamed `NewOnBehalfOfCredentialFromSecret` to `NewOnBehalfOfCredentialWithSecret` + +### Other Changes +* Upgraded to MSAL v1.0.0 + +## 1.3.0-beta.5 (2023-04-11) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.3.0-beta.4 +* Moved `NewWorkloadIdentityCredential()` parameters into `WorkloadIdentityCredentialOptions`. + The constructor now reads default configuration from environment variables set by the Azure + workload identity webhook by default. + ([#20478](https://github.com/Azure/azure-sdk-for-go/pull/20478)) +* Removed CAE support. It will return in v1.4.0-beta.1 + ([#20479](https://github.com/Azure/azure-sdk-for-go/pull/20479)) + +### Bugs Fixed +* Fixed an issue in `DefaultAzureCredential` that could cause the managed identity endpoint check to fail in rare circumstances. + +## 1.3.0-beta.4 (2023-03-08) + +### Features Added +* Added `WorkloadIdentityCredentialOptions.AdditionallyAllowedTenants` and `.DisableInstanceDiscovery` + +### Bugs Fixed +* Credentials now synchronize within `GetToken()` so a single instance can be shared among goroutines + ([#20044](https://github.com/Azure/azure-sdk-for-go/issues/20044)) + +### Other Changes +* Upgraded dependencies + +## 1.2.2 (2023-03-07) + +### Other Changes +* Upgraded dependencies + +## 1.3.0-beta.3 (2023-02-07) + +### Features Added +* By default, credentials set client capability "CP1" to enable support for + [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation). + This indicates to Azure Active Directory that your application can handle CAE claims challenges. + You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true". +* `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login + prompt with a username ([#15599](https://github.com/Azure/azure-sdk-for-go/pull/15599)) +* Service principal and user credentials support ADFS authentication on Azure Stack. + Specify "adfs" as the credential's tenant. +* Applications running in private or disconnected clouds can prevent credentials from + requesting Azure AD instance metadata by setting the `DisableInstanceDiscovery` + field on credential options. +* Many credentials can now be configured to authenticate in multiple tenants. The + options types for these credentials have an `AdditionallyAllowedTenants` field + that specifies additional tenants in which the credential may authenticate. + +## 1.3.0-beta.2 (2023-01-10) + +### Features Added +* Added `OnBehalfOfCredential` to support the on-behalf-of flow + ([#16642](https://github.com/Azure/azure-sdk-for-go/issues/16642)) + +### Bugs Fixed +* `AzureCLICredential` reports token expiration in local time (should be UTC) + +### Other Changes +* `AzureCLICredential` imposes its default timeout only when the `Context` + passed to `GetToken()` has no deadline +* Added `NewCredentialUnavailableError()`. This function constructs an error indicating + a credential can't authenticate and an encompassing `ChainedTokenCredential` should + try its next credential, if any. + +## 1.3.0-beta.1 (2022-12-13) + +### Features Added +* `WorkloadIdentityCredential` and `DefaultAzureCredential` support + Workload Identity Federation on Kubernetes. `DefaultAzureCredential` + support requires environment variable configuration as set by the + Workload Identity webhook. + ([#15615](https://github.com/Azure/azure-sdk-for-go/issues/15615)) + +## 1.2.0 (2022-11-08) + +### Other Changes +* This version includes all fixes and features from 1.2.0-beta.* + +## 1.2.0-beta.3 (2022-10-11) + +### Features Added +* `ManagedIdentityCredential` caches tokens in memory + +### Bugs Fixed +* `ClientCertificateCredential` sends only the leaf cert for SNI authentication + +## 1.2.0-beta.2 (2022-08-10) + +### Features Added +* Added `ClientAssertionCredential` to enable applications to authenticate + with custom client assertions + +### Other Changes +* Updated AuthenticationFailedError with links to TROUBLESHOOTING.md for relevant errors +* Upgraded `microsoft-authentication-library-for-go` requirement to v0.6.0 + +## 1.2.0-beta.1 (2022-06-07) + +### Features Added +* `EnvironmentCredential` reads certificate passwords from `AZURE_CLIENT_CERTIFICATE_PASSWORD` + ([#17099](https://github.com/Azure/azure-sdk-for-go/pull/17099)) + +## 1.1.0 (2022-06-07) + +### Features Added +* `ClientCertificateCredential` and `ClientSecretCredential` support ESTS-R. First-party + applications can set environment variable `AZURE_REGIONAL_AUTHORITY_NAME` with a + region name. + ([#15605](https://github.com/Azure/azure-sdk-for-go/issues/15605)) + +## 1.0.1 (2022-06-07) + +### Other Changes +* Upgrade `microsoft-authentication-library-for-go` requirement to v0.5.1 + ([#18176](https://github.com/Azure/azure-sdk-for-go/issues/18176)) + +## 1.0.0 (2022-05-12) + +### Features Added +* `DefaultAzureCredential` reads environment variable `AZURE_CLIENT_ID` for the + client ID of a user-assigned managed identity + ([#17293](https://github.com/Azure/azure-sdk-for-go/pull/17293)) + +### Breaking Changes +* Removed `AuthorizationCodeCredential`. Use `InteractiveBrowserCredential` instead + to authenticate a user with the authorization code flow. +* Instances of `AuthenticationFailedError` are now returned by pointer. +* `GetToken()` returns `azcore.AccessToken` by value + +### Bugs Fixed +* `AzureCLICredential` panics after receiving an unexpected error type + ([#17490](https://github.com/Azure/azure-sdk-for-go/issues/17490)) + +### Other Changes +* `GetToken()` returns an error when the caller specifies no scope +* Updated to the latest versions of `golang.org/x/crypto`, `azcore` and `internal` + +## 0.14.0 (2022-04-05) + +### Breaking Changes +* This module now requires Go 1.18 +* Removed `AuthorityHost`. Credentials are now configured for sovereign or private + clouds with the API in `azcore/cloud`, for example: + ```go + // before + opts := azidentity.ClientSecretCredentialOptions{AuthorityHost: azidentity.AzureGovernment} + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + + // after + import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + + opts := azidentity.ClientSecretCredentialOptions{} + opts.Cloud = cloud.AzureGovernment + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + ``` + +## 0.13.2 (2022-03-08) + +### Bugs Fixed +* Prevented a data race in `DefaultAzureCredential` and `ChainedTokenCredential` + ([#17144](https://github.com/Azure/azure-sdk-for-go/issues/17144)) + +### Other Changes +* Upgraded App Service managed identity version from 2017-09-01 to 2019-08-01 + ([#17086](https://github.com/Azure/azure-sdk-for-go/pull/17086)) + +## 0.13.1 (2022-02-08) + +### Features Added +* `EnvironmentCredential` supports certificate SNI authentication when + `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN` is "true". + ([#16851](https://github.com/Azure/azure-sdk-for-go/pull/16851)) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken()` now returns an error when configured for + a user assigned identity in Azure Cloud Shell (which doesn't support such identities) + ([#16946](https://github.com/Azure/azure-sdk-for-go/pull/16946)) + +### Other Changes +* `NewDefaultAzureCredential()` logs non-fatal errors. These errors are also included in the + error returned by `DefaultAzureCredential.GetToken()` when it's unable to acquire a token + from any source. ([#15923](https://github.com/Azure/azure-sdk-for-go/issues/15923)) + +## 0.13.0 (2022-01-11) + +### Breaking Changes +* Replaced `AuthenticationFailedError.RawResponse()` with a field having the same name +* Unexported `CredentialUnavailableError` +* Instances of `ChainedTokenCredential` will now skip looping through the list of source credentials and re-use the first successful credential on subsequent calls to `GetToken`. + * If `ChainedTokenCredentialOptions.RetrySources` is true, `ChainedTokenCredential` will continue to try all of the originally provided credentials each time the `GetToken` method is called. + * `ChainedTokenCredential.successfulCredential` will contain a reference to the last successful credential. + * `DefaultAzureCredenial` will also re-use the first successful credential on subsequent calls to `GetToken`. + * `DefaultAzureCredential.chain.successfulCredential` will also contain a reference to the last successful credential. + +### Other Changes +* `ManagedIdentityCredential` no longer probes IMDS before requesting a token + from it. Also, an error response from IMDS no longer disables a credential + instance. Following an error, a credential instance will continue to send + requests to IMDS as necessary. +* Adopted MSAL for user and service principal authentication +* Updated `azcore` requirement to 0.21.0 + +## 0.12.0 (2021-11-02) +### Breaking Changes +* Raised minimum go version to 1.16 +* Removed `NewAuthenticationPolicy()` from credentials. Clients should instead use azcore's + `runtime.NewBearerTokenPolicy()` to construct a bearer token authorization policy. +* The `AuthorityHost` field in credential options structs is now a custom type, + `AuthorityHost`, with underlying type `string` +* `NewChainedTokenCredential` has a new signature to accommodate a placeholder + options struct: + ```go + // before + cred, err := NewChainedTokenCredential(credA, credB) + + // after + cred, err := NewChainedTokenCredential([]azcore.TokenCredential{credA, credB}, nil) + ``` +* Removed `ExcludeAzureCLICredential`, `ExcludeEnvironmentCredential`, and `ExcludeMSICredential` + from `DefaultAzureCredentialOptions` +* `NewClientCertificateCredential` requires a `[]*x509.Certificate` and `crypto.PrivateKey` instead of + a path to a certificate file. Added `ParseCertificates` to simplify getting these in common cases: + ```go + // before + cred, err := NewClientCertificateCredential("tenant", "client-id", "/cert.pem", nil) + + // after + certData, err := os.ReadFile("/cert.pem") + certs, key, err := ParseCertificates(certData, password) + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, nil) + ``` +* Removed `InteractiveBrowserCredentialOptions.ClientSecret` and `.Port` +* Removed `AADAuthenticationFailedError` +* Removed `id` parameter of `NewManagedIdentityCredential()`. User assigned identities are now + specified by `ManagedIdentityCredentialOptions.ID`: + ```go + // before + cred, err := NewManagedIdentityCredential("client-id", nil) + // or, for a resource ID + opts := &ManagedIdentityCredentialOptions{ID: ResourceID} + cred, err := NewManagedIdentityCredential("/subscriptions/...", opts) + + // after + clientID := ClientID("7cf7db0d-...") + opts := &ManagedIdentityCredentialOptions{ID: clientID} + // or, for a resource ID + resID: ResourceID("/subscriptions/...") + opts := &ManagedIdentityCredentialOptions{ID: resID} + cred, err := NewManagedIdentityCredential(opts) + ``` +* `DeviceCodeCredentialOptions.UserPrompt` has a new type: `func(context.Context, DeviceCodeMessage) error` +* Credential options structs now embed `azcore.ClientOptions`. In addition to changing literal initialization + syntax, this change renames `HTTPClient` fields to `Transport`. +* Renamed `LogCredential` to `EventCredential` +* `AzureCLICredential` no longer reads the environment variable `AZURE_CLI_PATH` +* `NewManagedIdentityCredential` no longer reads environment variables `AZURE_CLIENT_ID` and + `AZURE_RESOURCE_ID`. Use `ManagedIdentityCredentialOptions.ID` instead. +* Unexported `AuthenticationFailedError` and `CredentialUnavailableError` structs. In their place are two + interfaces having the same names. + +### Bugs Fixed +* `AzureCLICredential.GetToken` no longer mutates its `opts.Scopes` + +### Features Added +* Added connection configuration options to `DefaultAzureCredentialOptions` +* `AuthenticationFailedError.RawResponse()` returns the HTTP response motivating the error, + if available + +### Other Changes +* `NewDefaultAzureCredential()` returns `*DefaultAzureCredential` instead of `*ChainedTokenCredential` +* Added `TenantID` field to `DefaultAzureCredentialOptions` and `AzureCLICredentialOptions` + +## 0.11.0 (2021-09-08) +### Breaking Changes +* Unexported `AzureCLICredentialOptions.TokenProvider` and its type, + `AzureCLITokenProvider` + +### Bug Fixes +* `ManagedIdentityCredential.GetToken` returns `CredentialUnavailableError` + when IMDS has no assigned identity, signaling `DefaultAzureCredential` to + try other credentials + + +## 0.10.0 (2021-08-30) +### Breaking Changes +* Update based on `azcore` refactor [#15383](https://github.com/Azure/azure-sdk-for-go/pull/15383) + +## 0.9.3 (2021-08-20) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken` no longer mutates its `opts.Scopes` + +### Other Changes +* Bumps version of `azcore` to `v0.18.1` + + +## 0.9.2 (2021-07-23) +### Features Added +* Adding support for Service Fabric environment in `ManagedIdentityCredential` +* Adding an option for using a resource ID instead of client ID in `ManagedIdentityCredential` + + +## 0.9.1 (2021-05-24) +### Features Added +* Add LICENSE.txt and bump version information + + +## 0.9.0 (2021-05-21) +### Features Added +* Add support for authenticating in Azure Stack environments +* Enable user assigned identities for the IMDS scenario in `ManagedIdentityCredential` +* Add scope to resource conversion in `GetToken()` on `ManagedIdentityCredential` + + +## 0.8.0 (2021-01-20) +### Features Added +* Updating documentation + + +## 0.7.1 (2021-01-04) +### Features Added +* Adding port option to `InteractiveBrowserCredential` + + +## 0.7.0 (2020-12-11) +### Features Added +* Add `redirectURI` parameter back to authentication code flow + + +## 0.6.1 (2020-12-09) +### Features Added +* Updating query parameter in `ManagedIdentityCredential` and updating datetime string for parsing managed identity access tokens. + + +## 0.6.0 (2020-11-16) +### Features Added +* Remove `RedirectURL` parameter from auth code flow to align with the MSAL implementation which relies on the native client redirect URL. + + +## 0.5.0 (2020-10-30) +### Features Added +* Flattening credential options + + +## 0.4.3 (2020-10-21) +### Features Added +* Adding Azure Arc support in `ManagedIdentityCredential` + + +## 0.4.2 (2020-10-16) +### Features Added +* Typo fixes + + +## 0.4.1 (2020-10-16) +### Features Added +* Ensure authority hosts are only HTTPs + + +## 0.4.0 (2020-10-16) +### Features Added +* Adding options structs for credentials + + +## 0.3.0 (2020-10-09) +### Features Added +* Update `DeviceCodeCredential` callback + + +## 0.2.2 (2020-10-09) +### Features Added +* Add `AuthorizationCodeCredential` + + +## 0.2.1 (2020-10-06) +### Features Added +* Add `InteractiveBrowserCredential` + + +## 0.2.0 (2020-09-11) +### Features Added +* Refactor `azidentity` on top of `azcore` refactor +* Updated policies to conform to `policy.Policy` interface changes. +* Updated non-retriable errors to conform to `azcore.NonRetriableError`. +* Fixed calls to `Request.SetBody()` to include content type. +* Switched endpoints to string types and removed extra parsing code. + + +## 0.1.1 (2020-09-02) +### Features Added +* Add `AzureCLICredential` to `DefaultAzureCredential` chain + + +## 0.1.0 (2020-07-23) +### Features Added +* Initial Release. Azure Identity library that provides Azure Active Directory token authentication support for the SDK. diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt similarity index 86% rename from vendor/github.com/mitchellh/copystructure/LICENSE rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt index 22985159..48ea6616 100644 --- a/vendor/github.com/mitchellh/copystructure/LICENSE +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2014 Mitchell Hashimoto +Copyright (c) Microsoft Corporation. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -9,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md new file mode 100644 index 00000000..4ac53eb7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -0,0 +1,307 @@ +# Migrating from autorest/adal to azidentity + +`azidentity` provides Azure Active Directory (Azure AD) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. + +This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. + +## Table of contents + +- [Acquire a token](#acquire-a-token) +- [Client certificate authentication](#client-certificate-authentication) +- [Client secret authentication](#client-secret-authentication) +- [Configuration](#configuration) +- [Device code authentication](#device-code-authentication) +- [Managed identity](#managed-identity) +- [Use azidentity credentials with older packages](#use-azidentity-credentials-with-older-packages) + +## Configuration + +### `autorest/adal` + +Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires an Azure AD endpoint and tenant: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.chinacloudapi.cn", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.chinacloudapi.cn/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +``` + +### `azidentity` + +A credential instance can acquire tokens for any audience. The audience for each token is determined by the client requesting it. Credentials require endpoint configuration only for sovereign or private clouds. The `azcore/cloud` package has predefined configuration for sovereign clouds such as Azure China: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +clientOpts := azcore.ClientOptions{Cloud: cloud.AzureChina} + +cred, err := azidentity.NewClientSecretCredential( + tenantID, clientID, secret, &azidentity.ClientSecretCredentialOptions{ClientOptions: clientOpts}, +) +handle(err) +``` + +## Client secret authentication + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.azure.com/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Client certificate authentication + +### `autorest/adal` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +handle(err) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, clientID, certificate, rsaPrivateKey, "https://management.azure.com/", +) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certs, key, err := azidentity.ParseCertificates(certData, nil) +handle(err) + +cred, err = azidentity.NewClientCertificateCredential(tenantID, clientID, certs, key, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Managed identity + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/", nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewManagedIdentityCredential(nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +### User-assigned identities + +`autorest/adal`: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +opts := &adal.ManagedIdentityOptions{ClientID: "..."} +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/") +handle(err) +``` + +`azidentity`: + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + +opts := azidentity.ManagedIdentityCredentialOptions{ID: azidentity.ClientID("...")} +cred, err := azidentity.NewManagedIdentityCredential(&opts) +handle(err) +``` + +## Device code authentication + +### `autorest/adal` + +```go +import ( + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthClient := &http.Client{} +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +resource := "https://management.azure.com/" +deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthCfg, clientID, resource) +handle(err) + +// display instructions, wait for the user to authenticate +fmt.Println(*deviceCode.Message) +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromManualToken(*oauthCfg, clientID, resource, *token) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewDeviceCodeCredential(nil) +handle(err) + +client, err := armsubscriptions.NewSubscriptionsClient(cred, nil) +handle(err) +``` + +`azidentity.DeviceCodeCredential` will guide a user through authentication, printing instructions to the console by default. The user prompt is customizable. For more information, see the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential). + +## Acquire a token + +### `autorest/adal` + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://vault.azure.net", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) + +err = spt.Refresh() +if err == nil { + token := spt.Token +} +``` + +### `azidentity` + +In ordinary usage, application code doesn't need to request tokens from credentials directly. Azure SDK clients handle token acquisition and refreshing internally. However, applications may call `GetToken()` to do so. All credential types have this method. + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +tk, err := cred.GetToken( + context.TODO(), policy.TokenRequestOptions{Scopes: []string{"https://vault.azure.net/.default"}}, +) +if err == nil { + token := tk.Token +} +``` + +Note that `azidentity` credentials use the Azure AD v2.0 endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent). + +## Use azidentity credentials with older packages + +The [azidext module](https://pkg.go.dev/github.com/jongio/azidext/go/azidext) provides an adapter for `azidentity` credential types. The adapter enables using the credential types with older Azure SDK clients. For example: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/jongio/azidext/go/azidext" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = azidext.NewTokenCredentialAdapter(cred, []string{"https://management.azure.com//.default"}) +``` + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FMIGRATION.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md new file mode 100644 index 00000000..da0baa9a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -0,0 +1,243 @@ +# Azure Identity Client Module for Go + +The Azure Identity module provides Azure Active Directory (Azure AD) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) +| [Azure Active Directory documentation](https://docs.microsoft.com/azure/active-directory/) +| [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) + +# Getting started + +## Install the module + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Install the Azure Identity module: + +```sh +go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +## Prerequisites + +- an [Azure subscription](https://azure.microsoft.com/free/) +- Go 1.18 + +### Authenticating during local development + +When debugging and executing code locally, developers typically use their own accounts to authenticate calls to Azure services. The `azidentity` module supports authenticating through developer tools to simplify local development. + +#### Authenticating via the Azure CLI + +`DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user +signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. + +When no default browser is available, `az login` will use the device code +authentication flow. This can also be selected manually by running `az login --use-device-code`. + +## Key concepts + +### Credentials + +A credential is a type which contains or can obtain the data needed for a +service client to authenticate requests. Service clients across the Azure SDK +accept a credential instance when they are constructed, and use that credential +to authenticate requests. + +The `azidentity` module focuses on OAuth authentication with Azure Active +Directory (AAD). It offers a variety of credential types capable of acquiring +an Azure AD access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. + +### DefaultAzureCredential + +`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: + +![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) + +1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate. +1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity. +1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. +1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. + +> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. + +## Managed Identity + +`DefaultAzureCredential` and `ManagedIdentityCredential` support +[managed identity authentication](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) +in any hosting environment which supports managed identities, such as (this list is not exhaustive): +* [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity) +* [Azure Arc](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) +* [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/msi-authorization) +* [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/use-managed-identity) +* [Azure Service Fabric](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity) +* [Azure Virtual Machines](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token) + +## Examples + +- [Authenticate with DefaultAzureCredential](#authenticate-with-defaultazurecredential "Authenticate with DefaultAzureCredential") +- [Define a custom authentication flow with ChainedTokenCredential](#define-a-custom-authentication-flow-with-chainedtokencredential "Define a custom authentication flow with ChainedTokenCredential") +- [Specify a user-assigned managed identity for DefaultAzureCredential](#specify-a-user-assigned-managed-identity-for-defaultazurecredential) + +### Authenticate with DefaultAzureCredential + +This example demonstrates authenticating a client from the `armresources` module with `DefaultAzureCredential`. + +```go +cred, err := azidentity.NewDefaultAzureCredential(nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", cred, nil) +``` + +### Specify a user-assigned managed identity for DefaultAzureCredential + +To configure `DefaultAzureCredential` to authenticate a user-assigned managed identity, set the environment variable `AZURE_CLIENT_ID` to the identity's client ID. + +### Define a custom authentication flow with `ChainedTokenCredential` + +`DefaultAzureCredential` is generally the quickest way to get started developing apps for Azure. For more advanced scenarios, `ChainedTokenCredential` links multiple credential instances to be tried sequentially when authenticating. It will try each chained credential in turn until one provides a token or fails to authenticate due to an error. + +The following example demonstrates creating a credential, which will attempt to authenticate using managed identity. It will fall back to authenticating via the Azure CLI when a managed identity is unavailable. + +```go +managed, err := azidentity.NewManagedIdentityCredential(nil) +if err != nil { + // handle error +} +azCLI, err := azidentity.NewAzureCLICredential(nil) +if err != nil { + // handle error +} +chain, err := azidentity.NewChainedTokenCredential([]azcore.TokenCredential{managed, azCLI}, nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) +``` + +## Credential Types + +### Authenticating Azure Hosted Applications + +|Credential|Usage +|-|- +|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps +|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials +|[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables +|[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource +|[WorkloadIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential)|Authenticate a workload identity on Kubernetes + +### Authenticating Service Principals + +|Credential|Usage +|-|- +|[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion +|[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate +|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret + +### Authenticating Users + +|Credential|Usage +|-|- +|[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser +|[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI +|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password + +### Authenticating via Development Tools + +|Credential|Usage +|-|- +|[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI + +## Environment Variables + +`DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables: + +#### Service principal with secret + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_SECRET`|one of the application's client secrets + +#### Service principal with certificate + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key +|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any + +#### Username and password + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_USERNAME`|a username (usually an email address) +|`AZURE_PASSWORD`|that user's password + +Configuration is attempted in the above order. For example, if values for a +client secret and certificate are both present, the client secret will be used. + +## Troubleshooting + +### Error Handling + +Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). + +For more details on handling specific Azure Active Directory errors please refer to the +Azure Active Directory +[error code documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes). + +### Logging + +This module uses the classification-based logging implementation in `azcore`. To enable console logging for all SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. Use the `azcore/log` package to control log event output or to enable logs for `azidentity` only. For example: +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + +Credentials log basic information only, such as `GetToken` success or failure and errors. These log entries don't contain authentication secrets but may contain sensitive information. + +## Next steps + +Client and management modules listed on the [Azure SDK releases page](https://azure.github.io/azure-sdk/releases/latest/go.html) support authenticating with `azidentity` credential types. You can learn more about using these libraries in their documentation, which is linked from the release page. + +## Provide Feedback + +If you encounter bugs or have suggestions, please +[open an issue](https://github.com/Azure/azure-sdk-for-go/issues). + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md new file mode 100644 index 00000000..7b7515eb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -0,0 +1,205 @@ +# Troubleshoot Azure Identity authentication issues + +This troubleshooting guide covers failure investigation techniques, common errors for the credential types in the `azidentity` module, and mitigation steps to resolve these errors. + +## Table of contents + +- [Handle azidentity errors](#handle-azidentity-errors) + - [Permission issues](#permission-issues) +- [Find relevant information in errors](#find-relevant-information-in-errors) +- [Enable and configure logging](#enable-and-configure-logging) +- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) +- [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) +- [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) +- [Troubleshoot EnvironmentCredential authentication issues](#troubleshoot-environmentcredential-authentication-issues) +- [Troubleshoot ManagedIdentityCredential authentication issues](#troubleshoot-managedidentitycredential-authentication-issues) + - [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity) + - [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity) + - [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity) +- [Troubleshoot UsernamePasswordCredential authentication issues](#troubleshoot-usernamepasswordcredential-authentication-issues) +- [Troubleshoot WorkloadIdentityCredential authentication issues](#troubleshoot-workloadidentitycredential-authentication-issues) +- [Get additional help](#get-additional-help) + +## Handle azidentity errors + +Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Azure Active Directory (Azure AD). Depending on the application, these errors may or may not be recoverable. + +### Permission issues + +Service client errors with a status code of 401 or 403 often indicate that authentication succeeded but the caller doesn't have permission to access the specified API. Check the service documentation to determine which RBAC roles are needed for the request, and ensure the authenticated user or service principal has the appropriate role assignments. + +## Find relevant information in errors + +Authentication errors can include responses from Azure AD and often contain information helpful in diagnosis. Consider the following error message: + +``` +ClientSecretCredential authentication failed +POST https://login.microsoftonline.com/3c631bb7-a9f7-4343-a5ba-a615913/oauth2/v2.0/token +-------------------------------------------------------------------------------- +RESPONSE 401 Unauthorized +-------------------------------------------------------------------------------- +{ + "error": "invalid_client", + "error_description": "AADSTS7000215: Invalid client secret provided. Ensure the secret being sent in the request is the client secret value, not the client secret ID, for a secret added to app '86be4c01-505b-45e9-bfc0-9b825fd84'.\r\nTrace ID: 03da4b8e-5ffe-48ca-9754-aff4276f0100\r\nCorrelation ID: 7b12f9bb-2eef-42e3-ad75-eee69ec9088d\r\nTimestamp: 2022-03-02 18:25:26Z", + "error_codes": [ + 7000215 + ], + "timestamp": "2022-03-02 18:25:26Z", + "trace_id": "03da4b8e-5ffe-48ca-9754-aff4276f0100", + "correlation_id": "7b12f9bb-2eef-42e3-ad75-eee69ec9088d", + "error_uri": "https://login.microsoftonline.com/error?code=7000215" +} +-------------------------------------------------------------------------------- +``` + +This error contains several pieces of information: + +- __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. + +- __Azure AD Error Code and Message__: The error code and message returned by Azure AD. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes#aadsts-error-codes) has more information on AADSTS error codes. + +- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Azure AD failures. + +### Enable and configure logging + +`azidentity` provides the same logging capabilities as the rest of the Azure SDK. The simplest way to see the logs to help debug authentication issues is to print credential logs to the console. +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + +## Troubleshoot DefaultAzureCredential authentication issues + +| Error |Description| Mitigation | +|---|---|---| +|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
  • [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
  • Consult the troubleshooting guide for underlying credential types for more information.
    • [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
    • [ManagedIdentityCredential](#troubleshoot-managedidentitycredential-authentication-issues)
    • [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
    | +|Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|
    • [Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token.
    • If an unexpected credential is returning a token, check application configuration such as environment variables.
    • Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role.
    | + +## Troubleshoot EnvironmentCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Missing or incomplete environment variable configuration|A valid combination of environment variables wasn't set|Ensure the appropriate environment variables are set for the intended authentication method as described in the [module documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)| + + +## Troubleshoot ClientSecretCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + + +## Troubleshoot ClientCertificateCredential authentication issues + +| Error Code | Description | Mitigation | +|---|---|---| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + + +## Troubleshoot UsernamePasswordCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS50126|The provided username or password is invalid.|Ensure the username and password provided to the credential constructor are valid.| + + +## Troubleshoot ManagedIdentityCredential authentication issues + +`ManagedIdentityCredential` is designed to work on a variety of Azure hosts support managed identity. Configuration and troubleshooting vary from host to host. The below table lists the Azure hosts that can be assigned a managed identity and are supported by `ManagedIdentityCredential`. + +|Host Environment| | | +|---|---|---| +|Azure Virtual Machines and Scale Sets|[Configuration](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| +|Azure App Service and Azure Functions|[Configuration](https://docs.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| +|Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)| +|Azure Arc|[Configuration](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| +|Azure Service Fabric|[Configuration](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)|| + +### Azure Virtual Machine managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

    If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| +|The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`| +|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|

    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | +|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
    • Refer to the error message for more details on specific failures.
    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | + +#### Verify IMDS is available on the VM + +If you have access to the VM, you can use `curl` to verify the managed identity endpoint is available. + +```sh +curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://management.core.windows.net&api-version=2018-02-01' -H "Metadata: true" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure App Service and Azure Functions managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
    • Ensure the App Service is configured for managed identity as described in [App Service documentation](https://docs.microsoft.com/azure/app-service/overview-managed-identity).
    • Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
    | + +#### Verify the App Service managed identity endpoint is available + +If you can SSH into the App Service, you can verify managed identity is available in the environment. First ensure the environment variables `IDENTITY_ENDPOINT` and `IDENTITY_SECRET` are set. Then you can verify the managed identity endpoint is available using `curl`. + +```sh +curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-version=2019-08-01" -H "X-IDENTITY-HEADER: $IDENTITY_HEADER" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure Kubernetes Service managed identity + +#### Pod Identity + +| Error Message |Description| Mitigation | +|---|---|---| +|"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). + + +## Troubleshoot AzureCliCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
    • Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://docs.microsoft.com/cli/azure/install-azure-cli).
    • Validate the installation location is in the application's `PATH` environment variable.
    | +|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
    • Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://docs.microsoft.com/cli/azure/authenticate-azure-cli).
    • Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
    | + +#### Verify the Azure CLI can obtain tokens + +You can manually verify that the Azure CLI can authenticate and obtain tokens. First, use the `account` command to verify the logged in account. + +```azurecli +az account show +``` + +Once you've verified the Azure CLI is using the correct account, you can validate that it's able to obtain tokens for that account. + +```azurecli +az account get-access-token --output json --resource https://management.core.windows.net +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + + +## Troubleshoot `WorkloadIdentityCredential` authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.
    • If your application runs on Azure Kubernetes Servide (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
    • If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions` + +## Get additional help + +Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json new file mode 100644 index 00000000..47e77f88 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/azidentity", + "Tag": "go/azidentity_6225ab0470" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go new file mode 100644 index 00000000..739ff49c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -0,0 +1,190 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "net/url" + "os" + "regexp" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const ( + azureAdditionallyAllowedTenants = "AZURE_ADDITIONALLY_ALLOWED_TENANTS" + azureAuthorityHost = "AZURE_AUTHORITY_HOST" + azureClientCertificatePassword = "AZURE_CLIENT_CERTIFICATE_PASSWORD" + azureClientCertificatePath = "AZURE_CLIENT_CERTIFICATE_PATH" + azureClientID = "AZURE_CLIENT_ID" + azureClientSecret = "AZURE_CLIENT_SECRET" + azureFederatedTokenFile = "AZURE_FEDERATED_TOKEN_FILE" + azurePassword = "AZURE_PASSWORD" + azureRegionalAuthorityName = "AZURE_REGIONAL_AUTHORITY_NAME" + azureTenantID = "AZURE_TENANT_ID" + azureUsername = "AZURE_USERNAME" + + organizationsTenantID = "organizations" + developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" + defaultSuffix = "/.default" + tenantIDValidationErr = "invalid tenantID. You can locate your tenantID by following the instructions listed here: https://docs.microsoft.com/partner-center/find-ids-and-domain-names" +) + +var ( + // capability CP1 indicates the client application is capable of handling CAE claims challenges + cp1 = []string{"CP1"} + // CP1 is disabled until CAE support is added back + disableCP1 = true +) + +var getConfidentialClient = func(clientID, tenantID string, cred confidential.Credential, co *azcore.ClientOptions, additionalOpts ...confidential.Option) (confidentialClient, error) { + if !validTenantID(tenantID) { + return confidential.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return confidential.Client{}, err + } + authority := runtime.JoinPaths(authorityHost, tenantID) + o := []confidential.Option{ + confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), + confidential.WithHTTPClient(newPipelineAdapter(co)), + } + if !disableCP1 { + o = append(o, confidential.WithClientCapabilities(cp1)) + } + o = append(o, additionalOpts...) + if strings.ToLower(tenantID) == "adfs" { + o = append(o, confidential.WithInstanceDiscovery(false)) + } + return confidential.New(authority, clientID, cred, o...) +} + +var getPublicClient = func(clientID, tenantID string, co *azcore.ClientOptions, additionalOpts ...public.Option) (public.Client, error) { + if !validTenantID(tenantID) { + return public.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return public.Client{}, err + } + o := []public.Option{ + public.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), + public.WithHTTPClient(newPipelineAdapter(co)), + } + if !disableCP1 { + o = append(o, public.WithClientCapabilities(cp1)) + } + o = append(o, additionalOpts...) + if strings.ToLower(tenantID) == "adfs" { + o = append(o, public.WithInstanceDiscovery(false)) + } + return public.New(clientID, o...) +} + +// setAuthorityHost initializes the authority host for credentials. Precedence is: +// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user +// 2. value of AZURE_AUTHORITY_HOST +// 3. default: Azure Public Cloud +func setAuthorityHost(cc cloud.Configuration) (string, error) { + host := cc.ActiveDirectoryAuthorityHost + if host == "" { + if len(cc.Services) > 0 { + return "", errors.New("missing ActiveDirectoryAuthorityHost for specified cloud") + } + host = cloud.AzurePublic.ActiveDirectoryAuthorityHost + if envAuthorityHost := os.Getenv(azureAuthorityHost); envAuthorityHost != "" { + host = envAuthorityHost + } + } + u, err := url.Parse(host) + if err != nil { + return "", err + } + if u.Scheme != "https" { + return "", errors.New("cannot use an authority host without https") + } + return host, nil +} + +// validTenantID return true is it receives a valid tenantID, returns false otherwise +func validTenantID(tenantID string) bool { + match, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", tenantID) + if err != nil { + return false + } + return match +} + +func newPipelineAdapter(opts *azcore.ClientOptions) pipelineAdapter { + pl := runtime.NewPipeline(component, version, runtime.PipelineOptions{}, opts) + return pipelineAdapter{pl: pl} +} + +type pipelineAdapter struct { + pl runtime.Pipeline +} + +func (p pipelineAdapter) CloseIdleConnections() { + // do nothing +} + +func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { + req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String()) + if err != nil { + return nil, err + } + if r.Body != nil && r.Body != http.NoBody { + // create a rewindable body from the existing body as required + var body io.ReadSeekCloser + if rsc, ok := r.Body.(io.ReadSeekCloser); ok { + body = rsc + } else { + b, err := io.ReadAll(r.Body) + if err != nil { + return nil, err + } + body = streaming.NopCloser(bytes.NewReader(b)) + } + err = req.SetBody(body, r.Header.Get("Content-Type")) + if err != nil { + return nil, err + } + } + resp, err := p.pl.Do(req) + if err != nil { + return nil, err + } + return resp, err +} + +// enables fakes for test scenarios +type confidentialClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...confidential.AcquireSilentOption) (confidential.AuthResult, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...confidential.AcquireByAuthCodeOption) (confidential.AuthResult, error) + AcquireTokenByCredential(ctx context.Context, scopes []string, options ...confidential.AcquireByCredentialOption) (confidential.AuthResult, error) + AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, options ...confidential.AcquireOnBehalfOfOption) (confidential.AuthResult, error) +} + +// enables fakes for test scenarios +type publicClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error) + AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string, options ...public.AcquireByUsernamePasswordOption) (public.AuthResult, error) + AcquireTokenByDeviceCode(ctx context.Context, scopes []string, options ...public.AcquireByDeviceCodeOption) (public.DeviceCode, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...public.AcquireByAuthCodeOption) (public.AuthResult, error) + AcquireTokenInteractive(ctx context.Context, scopes []string, options ...public.AcquireInteractiveOption) (public.AuthResult, error) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go new file mode 100644 index 00000000..33ff13c0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -0,0 +1,180 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "regexp" + "runtime" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +const ( + credNameAzureCLI = "AzureCLICredential" + timeoutCLIRequest = 10 * time.Second +) + +// used by tests to fake invoking the CLI +type azureCLITokenProvider func(ctx context.Context, resource string, tenantID string) ([]byte, error) + +// AzureCLICredentialOptions contains optional parameters for AzureCLICredential. +type AzureCLICredentialOptions struct { + // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition + // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the + // logged in account can access. + AdditionallyAllowedTenants []string + // TenantID identifies the tenant the credential should authenticate in. + // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. + TenantID string + + tokenProvider azureCLITokenProvider +} + +// init returns an instance of AzureCLICredentialOptions initialized with default values. +func (o *AzureCLICredentialOptions) init() { + if o.tokenProvider == nil { + o.tokenProvider = defaultTokenProvider() + } +} + +// AzureCLICredential authenticates as the identity logged in to the Azure CLI. +type AzureCLICredential struct { + s *syncer + tokenProvider azureCLITokenProvider +} + +// NewAzureCLICredential constructs an AzureCLICredential. Pass nil to accept default options. +func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredential, error) { + cp := AzureCLICredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c := AzureCLICredential{tokenProvider: cp.tokenProvider} + c.s = newSyncer(credNameAzureCLI, cp.TenantID, cp.AdditionallyAllowedTenants, c.requestToken, c.requestToken) + return &c, nil +} + +// GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI. +// This method is called automatically by Azure SDK clients. +func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) != 1 { + return azcore.AccessToken{}, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + } + // CLI expects an AAD v1 resource, not a v2 scope + opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} + return c.s.GetToken(ctx, opts) +} + +func (c *AzureCLICredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + b, err := c.tokenProvider(ctx, opts.Scopes[0], opts.TenantID) + if err != nil { + return azcore.AccessToken{}, err + } + at, err := c.createAccessToken(b) + if err != nil { + return azcore.AccessToken{}, err + } + return at, nil +} + +func defaultTokenProvider() func(ctx context.Context, resource string, tenantID string) ([]byte, error) { + return func(ctx context.Context, resource string, tenantID string) ([]byte, error) { + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + if err != nil { + return nil, err + } + if !match { + return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource) + } + + // set a default timeout for this authentication iff the application hasn't done so already + var cancel context.CancelFunc + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + ctx, cancel = context.WithTimeout(ctx, timeoutCLIRequest) + defer cancel() + } + + commandLine := "az account get-access-token -o json --resource " + resource + if tenantID != "" { + commandLine += " --tenant " + tenantID + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value") + } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") { + msg = "Azure CLI not found on path" + } + if msg == "" { + msg = err.Error() + } + return nil, newCredentialUnavailableError(credNameAzureCLI, msg) + } + + return output, nil + } +} + +func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { + t := struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` + }{} + err := json.Unmarshal(tk, &t) + if err != nil { + return azcore.AccessToken{}, err + } + + // the Azure CLI's "expiresOn" is local time + exp, err := time.ParseInLocation("2006-01-02 15:04:05.999999", t.ExpiresOn, time.Local) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("Error parsing token expiration time %q: %v", t.ExpiresOn, err) + } + + converted := azcore.AccessToken{ + Token: t.AccessToken, + ExpiresOn: exp.UTC(), + } + return converted, nil +} + +var _ azcore.TokenCredential = (*AzureCLICredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go new file mode 100644 index 00000000..dc855edf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -0,0 +1,138 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// ChainedTokenCredentialOptions contains optional parameters for ChainedTokenCredential. +type ChainedTokenCredentialOptions struct { + // RetrySources configures how the credential uses its sources. When true, the credential always attempts to + // authenticate through each source in turn, stopping when one succeeds. When false, the credential authenticates + // only through this first successful source--it never again tries the sources which failed. + RetrySources bool +} + +// ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default, +// it tries all the credentials until one authenticates, after which it always uses that credential. +type ChainedTokenCredential struct { + cond *sync.Cond + iterating bool + name string + retrySources bool + sources []azcore.TokenCredential + successfulCredential azcore.TokenCredential +} + +// NewChainedTokenCredential creates a ChainedTokenCredential. Pass nil for options to accept defaults. +func NewChainedTokenCredential(sources []azcore.TokenCredential, options *ChainedTokenCredentialOptions) (*ChainedTokenCredential, error) { + if len(sources) == 0 { + return nil, errors.New("sources must contain at least one TokenCredential") + } + for _, source := range sources { + if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil + return nil, errors.New("sources cannot contain nil") + } + } + cp := make([]azcore.TokenCredential, len(sources)) + copy(cp, sources) + if options == nil { + options = &ChainedTokenCredentialOptions{} + } + return &ChainedTokenCredential{ + cond: sync.NewCond(&sync.Mutex{}), + name: "ChainedTokenCredential", + retrySources: options.RetrySources, + sources: cp, + }, nil +} + +// GetToken calls GetToken on the chained credentials in turn, stopping when one returns a token. +// This method is called automatically by Azure SDK clients. +func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if !c.retrySources { + // ensure only one goroutine at a time iterates the sources and perhaps sets c.successfulCredential + c.cond.L.Lock() + for { + if c.successfulCredential != nil { + c.cond.L.Unlock() + return c.successfulCredential.GetToken(ctx, opts) + } + if !c.iterating { + c.iterating = true + // allow other goroutines to wait while this one iterates + c.cond.L.Unlock() + break + } + c.cond.Wait() + } + } + + var ( + err error + errs []error + successfulCredential azcore.TokenCredential + token azcore.AccessToken + unavailableErr *credentialUnavailableError + ) + for _, cred := range c.sources { + token, err = cred.GetToken(ctx, opts) + if err == nil { + log.Writef(EventAuthentication, "%s authenticated with %s", c.name, extractCredentialName(cred)) + successfulCredential = cred + break + } + errs = append(errs, err) + // continue to the next source iff this one returned credentialUnavailableError + if !errors.As(err, &unavailableErr) { + break + } + } + if c.iterating { + c.cond.L.Lock() + // this is nil when all credentials returned an error + c.successfulCredential = successfulCredential + c.iterating = false + c.cond.L.Unlock() + c.cond.Broadcast() + } + // err is the error returned by the last GetToken call. It will be nil when that call succeeds + if err != nil { + // return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise + msg := createChainedErrorMessage(errs) + if errors.As(err, &unavailableErr) { + err = newCredentialUnavailableError(c.name, msg) + } else { + res := getResponseFromError(err) + err = newAuthenticationFailedError(c.name, msg, res, err) + } + } + return token, err +} + +func createChainedErrorMessage(errs []error) string { + msg := "failed to acquire a token.\nAttempted credentials:" + for _, err := range errs { + msg += fmt.Sprintf("\n\t%s", err.Error()) + } + return msg +} + +func extractCredentialName(credential azcore.TokenCredential) string { + return strings.TrimPrefix(fmt.Sprintf("%T", credential), "*azidentity.") +} + +var _ azcore.TokenCredential = (*ChainedTokenCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml new file mode 100644 index 00000000..3b443e8e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -0,0 +1,47 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + RunLiveTests: true + ServiceDirectory: 'azidentity' + PreSteps: + - pwsh: | + [System.Convert]::FromBase64String($env:PFX_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/test.pfx -AsByteStream + Set-Content -Path $(Agent.TempDirectory)/test.pem -Value $env:PEM_CONTENTS + [System.Convert]::FromBase64String($env:SNI_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/testsni.pfx -AsByteStream + env: + PFX_CONTENTS: $(net-identity-spcert-pfx) + PEM_CONTENTS: $(net-identity-spcert-pem) + SNI_CONTENTS: $(net-identity-spcert-sni) + EnvVars: + AZURE_IDENTITY_TEST_TENANTID: $(net-identity-tenantid) + AZURE_IDENTITY_TEST_USERNAME: $(net-identity-username) + AZURE_IDENTITY_TEST_PASSWORD: $(net-identity-password) + IDENTITY_SP_TENANT_ID: $(net-identity-sp-tenantid) + IDENTITY_SP_CLIENT_ID: $(net-identity-sp-clientid) + IDENTITY_SP_CLIENT_SECRET: $(net-identity-sp-clientsecret) + IDENTITY_SP_CERT_PEM: $(Agent.TempDirectory)/test.pem + IDENTITY_SP_CERT_PFX: $(Agent.TempDirectory)/test.pfx + IDENTITY_SP_CERT_SNI: $(Agent.TempDirectory)/testsni.pfx diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go new file mode 100644 index 00000000..d9d22996 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -0,0 +1,83 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameAssertion = "ClientAssertionCredential" + +// ClientAssertionCredential authenticates an application with assertions provided by a callback function. +// This credential is for advanced scenarios. [ClientCertificateCredential] has a more convenient API for +// the most common assertion scenario, authenticating a service principal with a certificate. See +// [Azure AD documentation] for details of the assertion format. +// +// [Azure AD documentation]: https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +type ClientAssertionCredential struct { + client confidentialClient + s *syncer +} + +// ClientAssertionCredentialOptions contains optional parameters for ClientAssertionCredential. +type ClientAssertionCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. +func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(context.Context) (string, error), options *ClientAssertionCredentialOptions) (*ClientAssertionCredential, error) { + if getAssertion == nil { + return nil, errors.New("getAssertion must be a function that returns assertions") + } + if options == nil { + options = &ClientAssertionCredentialOptions{} + } + cred := confidential.NewCredFromAssertionCallback( + func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) { + return getAssertion(ctx) + }, + ) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + if err != nil { + return nil, err + } + cac := ClientAssertionCredential{client: c} + cac.s = newSyncer(credNameAssertion, tenantID, options.AdditionallyAllowedTenants, cac.requestToken, cac.silentAuth) + return &cac, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *ClientAssertionCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ClientAssertionCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go new file mode 100644 index 00000000..804eba89 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -0,0 +1,172 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "golang.org/x/crypto/pkcs12" +) + +const credNameCert = "ClientCertificateCredential" + +// ClientCertificateCredentialOptions contains optional parameters for ClientCertificateCredential. +type ClientCertificateCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c + // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. + // Defaults to False. + SendCertificateChain bool +} + +// ClientCertificateCredential authenticates a service principal with a certificate. +type ClientCertificateCredential struct { + client confidentialClient + s *syncer +} + +// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. +func NewClientCertificateCredential(tenantID string, clientID string, certs []*x509.Certificate, key crypto.PrivateKey, options *ClientCertificateCredentialOptions) (*ClientCertificateCredential, error) { + if len(certs) == 0 { + return nil, errors.New("at least one certificate is required") + } + if options == nil { + options = &ClientCertificateCredentialOptions{} + } + cred, err := confidential.NewCredFromCert(certs, key) + if err != nil { + return nil, err + } + var o []confidential.Option + if options.SendCertificateChain { + o = append(o, confidential.WithX5C()) + } + o = append(o, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, o...) + if err != nil { + return nil, err + } + cc := ClientCertificateCredential{client: c} + cc.s = newSyncer(credNameCert, tenantID, options.AdditionallyAllowedTenants, cc.requestToken, cc.silentAuth) + return &cc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *ClientCertificateCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ClientCertificateCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +// ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. +// Pass nil for password if the private key isn't encrypted. This function can't decrypt keys in PEM format. +func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, crypto.PrivateKey, error) { + var blocks []*pem.Block + var err error + if len(password) == 0 { + blocks, err = loadPEMCert(certData) + } + if len(blocks) == 0 || err != nil { + blocks, err = loadPKCS12Cert(certData, string(password)) + } + if err != nil { + return nil, nil, err + } + var certs []*x509.Certificate + var pk crypto.PrivateKey + for _, block := range blocks { + switch block.Type { + case "CERTIFICATE": + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certs = append(certs, c) + case "PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + } + if err != nil { + return nil, nil, err + } + case "RSA PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + } + } + if len(certs) == 0 { + return nil, nil, errors.New("found no certificate") + } + if pk == nil { + return nil, nil, errors.New("found no private key") + } + return certs, pk, nil +} + +func loadPEMCert(certData []byte) ([]*pem.Block, error) { + blocks := []*pem.Block{} + for { + var block *pem.Block + block, certData = pem.Decode(certData) + if block == nil { + break + } + blocks = append(blocks, block) + } + if len(blocks) == 0 { + return nil, errors.New("didn't find any PEM blocks") + } + return blocks, nil +} + +func loadPKCS12Cert(certData []byte, password string) ([]*pem.Block, error) { + blocks, err := pkcs12.ToPEM(certData, password) + if err != nil { + return nil, err + } + if len(blocks) == 0 { + // not mentioning PKCS12 in this message because we end up here when certData is garbage + return nil, errors.New("didn't find any certificate content") + } + return blocks, err +} + +var _ azcore.TokenCredential = (*ClientCertificateCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go new file mode 100644 index 00000000..dda21f6b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameSecret = "ClientSecretCredential" + +// ClientSecretCredentialOptions contains optional parameters for ClientSecretCredential. +type ClientSecretCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// ClientSecretCredential authenticates an application with a client secret. +type ClientSecretCredential struct { + client confidentialClient + s *syncer +} + +// NewClientSecretCredential constructs a ClientSecretCredential. Pass nil for options to accept defaults. +func NewClientSecretCredential(tenantID string, clientID string, clientSecret string, options *ClientSecretCredentialOptions) (*ClientSecretCredential, error) { + if options == nil { + options = &ClientSecretCredentialOptions{} + } + cred, err := confidential.NewCredFromSecret(clientSecret) + if err != nil { + return nil, err + } + c, err := getConfidentialClient( + clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery), + ) + if err != nil { + return nil, err + } + csc := ClientSecretCredential{client: c} + csc.s = newSyncer(credNameSecret, tenantID, options.AdditionallyAllowedTenants, csc.requestToken, csc.silentAuth) + return &csc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *ClientSecretCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ClientSecretCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ClientSecretCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go new file mode 100644 index 00000000..1e3efdc9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -0,0 +1,209 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "os" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential. +// These options may not apply to all credentials in the chain. +type DefaultAzureCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add + // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be + // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID identifies the tenant the Azure CLI should authenticate in. + // Defaults to the CLI's default tenant, which is typically the home tenant of the user logged in to the CLI. + TenantID string +} + +// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure. +// It combines credentials suitable for deployment with credentials suitable for local development. +// It attempts to authenticate with each of these credential types, in the following order, stopping +// when one provides a token: +// +// - [EnvironmentCredential] +// - [WorkloadIdentityCredential], if environment variable configuration is set by the Azure workload +// identity webhook. Use [WorkloadIdentityCredential] directly when not using the webhook or needing +// more control over its configuration. +// - [ManagedIdentityCredential] +// - [AzureCLICredential] +// +// Consult the documentation for these credential types for more information on how they authenticate. +// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for +// every subsequent authentication. +type DefaultAzureCredential struct { + chain *ChainedTokenCredential +} + +// NewDefaultAzureCredential creates a DefaultAzureCredential. Pass nil for options to accept defaults. +func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*DefaultAzureCredential, error) { + var creds []azcore.TokenCredential + var errorMessages []string + + if options == nil { + options = &DefaultAzureCredentialOptions{} + } + additionalTenants := options.AdditionallyAllowedTenants + if len(additionalTenants) == 0 { + if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" { + additionalTenants = strings.Split(tenants, ";") + } + } + + envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{ + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + additionallyAllowedTenants: additionalTenants, + }) + if err == nil { + creds = append(creds, envCred) + } else { + errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err}) + } + + // workload identity requires values for AZURE_AUTHORITY_HOST, AZURE_CLIENT_ID, AZURE_FEDERATED_TOKEN_FILE, AZURE_TENANT_ID + wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + }) + if err == nil { + creds = append(creds, wic) + } else { + errorMessages = append(errorMessages, credNameWorkloadIdentity+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err}) + } + o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions} + if ID, ok := os.LookupEnv(azureClientID); ok { + o.ID = ClientID(ID) + } + miCred, err := NewManagedIdentityCredential(o) + if err == nil { + creds = append(creds, &timeoutWrapper{mic: miCred, timeout: time.Second}) + } else { + errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err}) + } + + cliCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{AdditionallyAllowedTenants: additionalTenants, TenantID: options.TenantID}) + if err == nil { + creds = append(creds, cliCred) + } else { + errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err}) + } + + err = defaultAzureCredentialConstructorErrorHandler(len(creds), errorMessages) + if err != nil { + return nil, err + } + + chain, err := NewChainedTokenCredential(creds, nil) + if err != nil { + return nil, err + } + chain.name = "DefaultAzureCredential" + return &DefaultAzureCredential{chain: chain}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.chain.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*DefaultAzureCredential)(nil) + +func defaultAzureCredentialConstructorErrorHandler(numberOfSuccessfulCredentials int, errorMessages []string) (err error) { + errorMessage := strings.Join(errorMessages, "\n\t") + + if numberOfSuccessfulCredentials == 0 { + return errors.New(errorMessage) + } + + if len(errorMessages) != 0 { + log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", errorMessage) + } + + return nil +} + +// defaultCredentialErrorReporter is a substitute for credentials that couldn't be constructed. +// Its GetToken method always returns a credentialUnavailableError having the same message as +// the error that prevented constructing the credential. This ensures the message is present +// in the error returned by ChainedTokenCredential.GetToken() +type defaultCredentialErrorReporter struct { + credType string + err error +} + +func (d *defaultCredentialErrorReporter) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if _, ok := d.err.(*credentialUnavailableError); ok { + return azcore.AccessToken{}, d.err + } + return azcore.AccessToken{}, newCredentialUnavailableError(d.credType, d.err.Error()) +} + +var _ azcore.TokenCredential = (*defaultCredentialErrorReporter)(nil) + +// timeoutWrapper prevents a potentially very long timeout when managed identity isn't available +type timeoutWrapper struct { + mic *ManagedIdentityCredential + // timeout applies to all auth attempts until one doesn't time out + timeout time.Duration +} + +// GetToken wraps DefaultAzureCredential's initial managed identity auth attempt with a short timeout +// because managed identity may not be available and connecting to IMDS can take several minutes to time out. +func (w *timeoutWrapper) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var tk azcore.AccessToken + var err error + // no need to synchronize around this value because it's written only within ChainedTokenCredential's critical section + if w.timeout > 0 { + c, cancel := context.WithTimeout(ctx, w.timeout) + defer cancel() + tk, err = w.mic.GetToken(c, opts) + if isAuthFailedDueToContext(err) { + err = newCredentialUnavailableError(credNameManagedIdentity, "managed identity timed out") + } else { + // some managed identity implementation is available, so don't apply the timeout to future calls + w.timeout = 0 + } + } else { + tk, err = w.mic.GetToken(ctx, opts) + } + return tk, err +} + +// unwraps nested AuthenticationFailedErrors to get the root error +func isAuthFailedDueToContext(err error) bool { + for { + var authFailedErr *AuthenticationFailedError + if !errors.As(err, &authFailedErr) { + break + } + err = authFailedErr.err + } + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go new file mode 100644 index 00000000..108e83c4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -0,0 +1,136 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameDeviceCode = "DeviceCodeCredential" + +// DeviceCodeCredentialOptions contains optional parameters for DeviceCodeCredential. +type DeviceCodeCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire + // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + AdditionallyAllowedTenants []string + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant + // applications. + TenantID string + + // UserPrompt controls how the credential presents authentication instructions. The credential calls + // this function with authentication details when it receives a device code. By default, the credential + // prints these details to stdout. + UserPrompt func(context.Context, DeviceCodeMessage) error +} + +func (o *DeviceCodeCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } + if o.UserPrompt == nil { + o.UserPrompt = func(ctx context.Context, dc DeviceCodeMessage) error { + fmt.Println(dc.Message) + return nil + } + } +} + +// DeviceCodeMessage contains the information a user needs to complete authentication. +type DeviceCodeMessage struct { + // UserCode is the user code returned by the service. + UserCode string `json:"user_code"` + // VerificationURL is the URL at which the user must authenticate. + VerificationURL string `json:"verification_uri"` + // Message is user instruction from Azure Active Directory. + Message string `json:"message"` +} + +// DeviceCodeCredential acquires tokens for a user via the device code flow, which has the +// user browse to an Azure Active Directory URL, enter a code, and authenticate. It's useful +// for authenticating a user in an environment without a web browser, such as an SSH session. +// If a web browser is available, InteractiveBrowserCredential is more convenient because it +// automatically opens a browser to the login page. +type DeviceCodeCredential struct { + account public.Account + client publicClient + s *syncer + prompt func(context.Context, DeviceCodeMessage) error +} + +// NewDeviceCodeCredential creates a DeviceCodeCredential. Pass nil to accept default options. +func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeCredential, error) { + cp := DeviceCodeCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c, err := getPublicClient( + cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery), + ) + if err != nil { + return nil, err + } + cred := DeviceCodeCredential{client: c, prompt: cp.UserPrompt} + cred.s = newSyncer(credNameDeviceCode, cp.TenantID, cp.AdditionallyAllowedTenants, cred.requestToken, cred.silentAuth) + return &cred, nil +} + +// GetToken requests an access token from Azure Active Directory. It will begin the device code flow and poll until the user completes authentication. +// This method is called automatically by Azure SDK clients. +func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *DeviceCodeCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + dc, err := c.client.AcquireTokenByDeviceCode(ctx, opts.Scopes, public.WithTenantID(opts.TenantID)) + if err != nil { + return azcore.AccessToken{}, err + } + err = c.prompt(ctx, DeviceCodeMessage{ + Message: dc.Result.Message, + UserCode: dc.Result.UserCode, + VerificationURL: dc.Result.VerificationURL, + }) + if err != nil { + return azcore.AccessToken{}, err + } + ar, err := dc.AuthenticationResult(ctx) + if err != nil { + return azcore.AccessToken{}, err + } + c.account = ar.Account + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *DeviceCodeCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, + public.WithSilentAccount(c.account), + public.WithTenantID(opts.TenantID), + ) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go new file mode 100644 index 00000000..7ecd928e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -0,0 +1,164 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN" + +// EnvironmentCredentialOptions contains optional parameters for EnvironmentCredential +type EnvironmentCredentialOptions struct { + azcore.ClientOptions + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // additionallyAllowedTenants is used only by NewDefaultAzureCredential() to enable that constructor's explicit + // option to override the value of AZURE_ADDITIONALLY_ALLOWED_TENANTS. Applications using EnvironmentCredential + // directly should set that variable instead. This field should remain unexported to preserve this credential's + // unambiguous "all configuration from environment variables" design. + additionallyAllowedTenants []string +} + +// EnvironmentCredential authenticates a service principal with a secret or certificate, or a user with a password, depending +// on environment variable configuration. It reads configuration from these variables, in the following order: +// +// # Service principal with client secret +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_SECRET: one of the service principal's client secrets +// +// # Service principal with certificate +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 certificate file including the private key. +// +// AZURE_CLIENT_CERTIFICATE_PASSWORD: (optional) password for the certificate file. +// +// # User with username and password +// +// AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". +// +// AZURE_CLIENT_ID: client ID of the application the user will authenticate to +// +// AZURE_USERNAME: a username (usually an email address) +// +// AZURE_PASSWORD: the user's password +// +// # Configuration for multitenant applications +// +// To enable multitenant authentication, set AZURE_ADDITIONALLY_ALLOWED_TENANTS with a semicolon delimited list of tenants +// the credential may request tokens from in addition to the tenant specified by AZURE_TENANT_ID. Set +// AZURE_ADDITIONALLY_ALLOWED_TENANTS to "*" to enable the credential to request a token from any tenant. +type EnvironmentCredential struct { + cred azcore.TokenCredential +} + +// NewEnvironmentCredential creates an EnvironmentCredential. Pass nil to accept default options. +func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*EnvironmentCredential, error) { + if options == nil { + options = &EnvironmentCredentialOptions{} + } + tenantID := os.Getenv(azureTenantID) + if tenantID == "" { + return nil, errors.New("missing environment variable AZURE_TENANT_ID") + } + clientID := os.Getenv(azureClientID) + if clientID == "" { + return nil, errors.New("missing environment variable " + azureClientID) + } + // tenants set by NewDefaultAzureCredential() override the value of AZURE_ADDITIONALLY_ALLOWED_TENANTS + additionalTenants := options.additionallyAllowedTenants + if len(additionalTenants) == 0 { + if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" { + additionalTenants = strings.Split(tenants, ";") + } + } + if clientSecret := os.Getenv(azureClientSecret); clientSecret != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientSecretCredential") + o := &ClientSecretCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientSecretCredential(tenantID, clientID, clientSecret, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if certPath := os.Getenv(azureClientCertificatePath); certPath != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientCertificateCredential") + certData, err := os.ReadFile(certPath) + if err != nil { + return nil, fmt.Errorf(`failed to read certificate file "%s": %v`, certPath, err) + } + var password []byte + if v := os.Getenv(azureClientCertificatePassword); v != "" { + password = []byte(v) + } + certs, key, err := ParseCertificates(certData, password) + if err != nil { + return nil, fmt.Errorf(`failed to load certificate from "%s": %v`, certPath, err) + } + o := &ClientCertificateCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + if v, ok := os.LookupEnv(envVarSendCertChain); ok { + o.SendCertificateChain = v == "1" || strings.ToLower(v) == "true" + } + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if username := os.Getenv(azureUsername); username != "" { + if password := os.Getenv(azurePassword); password != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with UsernamePasswordCredential") + o := &UsernamePasswordCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewUsernamePasswordCredential(tenantID, clientID, username, password, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + return nil, errors.New("no value for AZURE_PASSWORD") + } + return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set") +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.cred.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*EnvironmentCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go new file mode 100644 index 00000000..86d8976a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -0,0 +1,129 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" +) + +// getResponseFromError retrieves the response carried by +// an AuthenticationFailedError or MSAL CallErr, if any +func getResponseFromError(err error) *http.Response { + var a *AuthenticationFailedError + var c msal.CallErr + var res *http.Response + if errors.As(err, &c) { + res = c.Resp + } else if errors.As(err, &a) { + res = a.RawResponse + } + return res +} + +// AuthenticationFailedError indicates an authentication request has failed. +type AuthenticationFailedError struct { + // RawResponse is the HTTP response motivating the error, if available. + RawResponse *http.Response + + credType string + message string + err error +} + +func newAuthenticationFailedError(credType string, message string, resp *http.Response, err error) error { + return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp, err: err} +} + +// Error implements the error interface. Note that the message contents are not contractual and can change over time. +func (e *AuthenticationFailedError) Error() string { + if e.RawResponse == nil { + return e.credType + ": " + e.message + } + msg := &bytes.Buffer{} + fmt.Fprintf(msg, e.credType+" authentication failed\n") + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := io.ReadAll(e.RawResponse.Body) + e.RawResponse.Body.Close() + if err != nil { + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + e.RawResponse.Body = io.NopCloser(bytes.NewReader(body)) + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + } else { + fmt.Fprint(msg, "Response contained no body") + } + fmt.Fprintln(msg, "\n--------------------------------------------------------------------------------") + var anchor string + switch e.credType { + case credNameAzureCLI: + anchor = "azure-cli" + case credNameCert: + anchor = "client-cert" + case credNameSecret: + anchor = "client-secret" + case credNameManagedIdentity: + anchor = "managed-id" + case credNameUserPassword: + anchor = "username-password" + case credNameWorkloadIdentity: + anchor = "workload" + } + if anchor != "" { + fmt.Fprintf(msg, "To troubleshoot, visit https://aka.ms/azsdk/go/identity/troubleshoot#%s", anchor) + } + return msg.String() +} + +// NonRetriable indicates the request which provoked this error shouldn't be retried. +func (*AuthenticationFailedError) NonRetriable() { + // marker method +} + +var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) + +// credentialUnavailableError indicates a credential can't attempt authentication because it lacks required +// data or state +type credentialUnavailableError struct { + message string +} + +// newCredentialUnavailableError is an internal helper that ensures consistent error message formatting +func newCredentialUnavailableError(credType, message string) error { + msg := fmt.Sprintf("%s: %s", credType, message) + return &credentialUnavailableError{msg} +} + +// NewCredentialUnavailableError constructs an error indicating a credential can't attempt authentication +// because it lacks required data or state. When [ChainedTokenCredential] receives this error it will try +// its next credential, if any. +func NewCredentialUnavailableError(message string) error { + return &credentialUnavailableError{message} +} + +// Error implements the error interface. Note that the message contents are not contractual and can change over time. +func (e *credentialUnavailableError) Error() string { + return e.message +} + +// NonRetriable is a marker method indicating this error should not be retried. It has no implementation. +func (e *credentialUnavailableError) NonRetriable() {} + +var _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go new file mode 100644 index 00000000..4868d22c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -0,0 +1,106 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameBrowser = "InteractiveBrowserCredential" + +// InteractiveBrowserCredentialOptions contains optional parameters for InteractiveBrowserCredential. +type InteractiveBrowserCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire + // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + AdditionallyAllowedTenants []string + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + + // LoginHint pre-populates the account prompt with a username. Users may choose to authenticate a different account. + LoginHint string + // RedirectURL is the URL Azure Active Directory will redirect to with the access token. This is required + // only when setting ClientID, and must match a redirect URI in the application's registration. + // Applications which have registered "http://localhost" as a redirect URI need not set this option. + RedirectURL string + + // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. + TenantID string +} + +func (o *InteractiveBrowserCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } +} + +// InteractiveBrowserCredential opens a browser to interactively authenticate a user. +type InteractiveBrowserCredential struct { + account public.Account + client publicClient + options InteractiveBrowserCredentialOptions + s *syncer +} + +// NewInteractiveBrowserCredential constructs a new InteractiveBrowserCredential. Pass nil to accept default options. +func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOptions) (*InteractiveBrowserCredential, error) { + cp := InteractiveBrowserCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery)) + if err != nil { + return nil, err + } + ibc := InteractiveBrowserCredential{client: c, options: cp} + ibc.s = newSyncer(credNameBrowser, cp.TenantID, cp.AdditionallyAllowedTenants, ibc.requestToken, ibc.silentAuth) + return &ibc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *InteractiveBrowserCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenInteractive(ctx, opts.Scopes, + public.WithLoginHint(c.options.LoginHint), + public.WithRedirectURI(c.options.RedirectURL), + public.WithTenantID(opts.TenantID), + ) + if err == nil { + c.account = ar.Account + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *InteractiveBrowserCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, + public.WithSilentAccount(c.account), + public.WithTenantID(opts.TenantID), + ) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go new file mode 100644 index 00000000..1aa1e0fc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go @@ -0,0 +1,14 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + +// EventAuthentication entries contain information about authentication. +// This includes information like the names of environment variables +// used when obtaining credentials and the type of credential used. +const EventAuthentication log.Event = "Authentication" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go new file mode 100644 index 00000000..d7b4a32a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -0,0 +1,388 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const ( + arcIMDSEndpoint = "IMDS_ENDPOINT" + identityEndpoint = "IDENTITY_ENDPOINT" + identityHeader = "IDENTITY_HEADER" + identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT" + headerMetadata = "Metadata" + imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + msiEndpoint = "MSI_ENDPOINT" + imdsAPIVersion = "2018-02-01" + azureArcAPIVersion = "2019-08-15" + serviceFabricAPIVersion = "2019-07-01-preview" + + qpClientID = "client_id" + qpResID = "mi_res_id" +) + +type msiType int + +const ( + msiTypeAppService msiType = iota + msiTypeAzureArc + msiTypeCloudShell + msiTypeIMDS + msiTypeServiceFabric +) + +// managedIdentityClient provides the base for authenticating in managed identity environments +// This type includes an runtime.Pipeline and TokenCredentialOptions. +type managedIdentityClient struct { + pipeline runtime.Pipeline + msiType msiType + endpoint string + id ManagedIDKind +} + +type wrappedNumber json.Number + +func (n *wrappedNumber) UnmarshalJSON(b []byte) error { + c := string(b) + if c == "\"\"" { + return nil + } + return json.Unmarshal(b, (*json.Number)(n)) +} + +// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS +func setIMDSRetryOptionDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = 5 + } + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 1 * time.Minute + } + if o.RetryDelay == 0 { + o.RetryDelay = 2 * time.Second + } + if o.StatusCodes == nil { + o.StatusCodes = []int{ + // IMDS docs recommend retrying 404, 429 and all 5xx + // https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling + http.StatusNotFound, // 404 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusNotImplemented, // 501 + http.StatusBadGateway, // 502 + http.StatusGatewayTimeout, // 504 + http.StatusHTTPVersionNotSupported, // 505 + http.StatusVariantAlsoNegotiates, // 506 + http.StatusInsufficientStorage, // 507 + http.StatusLoopDetected, // 508 + http.StatusNotExtended, // 510 + http.StatusNetworkAuthenticationRequired, // 511 + } + } + if o.TryTimeout == 0 { + o.TryTimeout = 1 * time.Minute + } +} + +// newManagedIdentityClient creates a new instance of the ManagedIdentityClient with the ManagedIdentityCredentialOptions +// that are passed into it along with a default pipeline. +// options: ManagedIdentityCredentialOptions configure policies for the pipeline and the authority host that +// will be used to retrieve tokens and authenticate +func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*managedIdentityClient, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + cp := options.ClientOptions + c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS} + env := "IMDS" + if endpoint, ok := os.LookupEnv(identityEndpoint); ok { + if _, ok := os.LookupEnv(identityHeader); ok { + if _, ok := os.LookupEnv(identityServerThumbprint); ok { + env = "Service Fabric" + c.endpoint = endpoint + c.msiType = msiTypeServiceFabric + } else { + env = "App Service" + c.endpoint = endpoint + c.msiType = msiTypeAppService + } + } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok { + env = "Azure Arc" + c.endpoint = endpoint + c.msiType = msiTypeAzureArc + } + } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { + env = "Cloud Shell" + c.endpoint = endpoint + c.msiType = msiTypeCloudShell + } else { + setIMDSRetryOptionDefaults(&cp.Retry) + } + c.pipeline = runtime.NewPipeline(component, version, runtime.PipelineOptions{}, &cp) + + if log.Should(EventAuthentication) { + log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env) + } + + return &c, nil +} + +// provideToken acquires a token for MSAL's confidential.Client, which caches the token +func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) { + result := confidential.TokenProviderResult{} + tk, err := c.authenticate(ctx, c.id, params.Scopes) + if err == nil { + result.AccessToken = tk.Token + result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds()) + } + return result, err +} + +// authenticate acquires an access token +func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { + msg, err := c.createAuthRequest(ctx, id, scopes) + if err != nil { + return azcore.AccessToken{}, err + } + + resp, err := c.pipeline.Do(msg) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) + } + + if runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return c.createAccessToken(resp) + } + + if c.msiType == msiTypeIMDS && resp.StatusCode == 400 { + if id != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) + } + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "no default identity is assigned to this resource") + } + + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil) +} + +func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) { + value := struct { + // these are the only fields that we use + Token string `json:"access_token,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` + ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid + ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string + }{} + if err := runtime.UnmarshalAsJSON(res, &value); err != nil { + return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) + } + if value.ExpiresIn != "" { + expiresIn, err := json.Number(value.ExpiresIn).Int64() + if err != nil { + return azcore.AccessToken{}, err + } + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil + } + switch v := value.ExpiresOn.(type) { + case float64: + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil + case string: + if expiresOn, err := strconv.Atoi(v); err == nil { + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil + } + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res, nil) + default: + msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res, nil) + } +} + +func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + switch c.msiType { + case msiTypeIMDS: + return c.createIMDSAuthRequest(ctx, id, scopes) + case msiTypeAppService: + return c.createAppServiceAuthRequest(ctx, id, scopes) + case msiTypeAzureArc: + // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service + key, err := c.getAzureArcSecretKey(ctx, scopes) + if err != nil { + msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err) + return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err) + } + return c.createAzureArcAuthRequest(ctx, id, scopes, key) + case msiTypeServiceFabric: + return c.createServiceFabricAuthRequest(ctx, id, scopes) + case msiTypeCloudShell: + return c.createCloudShellAuthRequest(ctx, id, scopes) + default: + return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment") + } +} + +func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", imdsAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader)) + q := request.Raw().URL.Query() + q.Add("api-version", "2019-08-01") + q.Add("resource", scopes[0]) + if id != nil { + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + q := request.Raw().URL.Query() + request.Raw().Header.Set("Accept", "application/json") + request.Raw().Header.Set("Secret", os.Getenv(identityHeader)) + q.Add("api-version", serviceFabricAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) { + // create the request to retreive the secret key challenge provided by the HIMDS service + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return "", err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + request.Raw().URL.RawQuery = q.Encode() + // send the initial request to get the short-lived secret key + response, err := c.pipeline.Do(request) + if err != nil { + return "", err + } + // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location + // of the secret key file. Any other status code indicates an error in the request. + if response.StatusCode != 401 { + msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode) + return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response, nil) + } + header := response.Header.Get("WWW-Authenticate") + if len(header) == 0 { + return "", errors.New("did not receive a value from WWW-Authenticate header") + } + // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key + pos := strings.LastIndex(header, "=") + if pos == -1 { + return "", fmt.Errorf("did not receive a correct value from WWW-Authenticate header: %s", header) + } + key, err := os.ReadFile(header[pos+1:]) + if err != nil { + return "", fmt.Errorf("could not read file (%s) contents: %v", header[pos+1:], err) + } + return string(key), nil +} + +func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodPost, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + data := url.Values{} + data.Set("resource", strings.Join(scopes, " ")) + dataEncoded := data.Encode() + body := streaming.NopCloser(strings.NewReader(dataEncoded)) + if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil { + return nil, err + } + if id != nil { + log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") + q := request.Raw().URL.Query() + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + return request, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go new file mode 100644 index 00000000..c6710ae5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -0,0 +1,127 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameManagedIdentity = "ManagedIdentityCredential" + +type managedIdentityIDKind int + +const ( + miClientID managedIdentityIDKind = 0 + miResourceID managedIdentityIDKind = 1 +) + +// ManagedIDKind identifies the ID of a managed identity as either a client or resource ID +type ManagedIDKind interface { + fmt.Stringer + idKind() managedIdentityIDKind +} + +// ClientID is the client ID of a user-assigned managed identity. +type ClientID string + +func (ClientID) idKind() managedIdentityIDKind { + return miClientID +} + +// String returns the string value of the ID. +func (c ClientID) String() string { + return string(c) +} + +// ResourceID is the resource ID of a user-assigned managed identity. +type ResourceID string + +func (ResourceID) idKind() managedIdentityIDKind { + return miResourceID +} + +// String returns the string value of the ID. +func (r ResourceID) String() string { + return string(r) +} + +// ManagedIdentityCredentialOptions contains optional parameters for ManagedIdentityCredential. +type ManagedIdentityCredentialOptions struct { + azcore.ClientOptions + + // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity + // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that + // some platforms don't accept resource IDs. + ID ManagedIDKind +} + +// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. +// This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a +// user-assigned identity. See Azure Active Directory documentation for more information about managed identities: +// https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +type ManagedIdentityCredential struct { + client confidentialClient + mic *managedIdentityClient + s *syncer +} + +// NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options. +func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*ManagedIdentityCredential, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + mic, err := newManagedIdentityClient(options) + if err != nil { + return nil, err + } + cred := confidential.NewCredFromTokenProvider(mic.provideToken) + + // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key. + // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL. + clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY" + if options.ID != nil { + clientID = options.ID.String() + } + // similarly, it's okay to give MSAL an incorrect authority URL because that URL won't be used + c, err := confidential.New("https://login.microsoftonline.com/common", clientID, cred) + if err != nil { + return nil, err + } + m := ManagedIdentityCredential{client: c, mic: mic} + m.s = newSyncer(credNameManagedIdentity, "", nil, m.requestToken, m.silentAuth) + return &m, nil +} + +// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. +func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) != 1 { + err := errors.New(credNameManagedIdentity + ": GetToken() requires exactly one scope") + return azcore.AccessToken{}, err + } + // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here + opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} + return c.s.GetToken(ctx, opts) +} + +func (c *ManagedIdentityCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *ManagedIdentityCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go new file mode 100644 index 00000000..3e173f47 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go @@ -0,0 +1,99 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "crypto" + "crypto/x509" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameOBO = "OnBehalfOfCredential" + +// OnBehalfOfCredential authenticates a service principal via the on-behalf-of flow. This is typically used by +// middle-tier services that authorize requests to other services with a delegated user identity. Because this +// is not an interactive authentication flow, an application using it must have admin consent for any delegated +// permissions before requesting tokens for them. See [Azure Active Directory documentation] for more details. +// +// [Azure Active Directory documentation]: https://docs.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow +type OnBehalfOfCredential struct { + assertion string + client confidentialClient + s *syncer +} + +// OnBehalfOfCredentialOptions contains optional parameters for OnBehalfOfCredential +type OnBehalfOfCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // SendCertificateChain applies only when the credential is configured to authenticate with a certificate. + // This setting controls whether the credential sends the public certificate chain in the x5c header of each + // token request's JWT. This is required for, and only used in, Subject Name/Issuer (SNI) authentication. + SendCertificateChain bool +} + +// NewOnBehalfOfCredentialWithCertificate constructs an OnBehalfOfCredential that authenticates with a certificate. +// See [ParseCertificates] for help loading a certificate. +func NewOnBehalfOfCredentialWithCertificate(tenantID, clientID, userAssertion string, certs []*x509.Certificate, key crypto.PrivateKey, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + cred, err := confidential.NewCredFromCert(certs, key) + if err != nil { + return nil, err + } + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + +// NewOnBehalfOfCredentialWithSecret constructs an OnBehalfOfCredential that authenticates with a client secret. +func NewOnBehalfOfCredentialWithSecret(tenantID, clientID, userAssertion, clientSecret string, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + cred, err := confidential.NewCredFromSecret(clientSecret) + if err != nil { + return nil, err + } + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + +func newOnBehalfOfCredential(tenantID, clientID, userAssertion string, cred confidential.Credential, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + if options == nil { + options = &OnBehalfOfCredentialOptions{} + } + opts := []confidential.Option{} + if options.SendCertificateChain { + opts = append(opts, confidential.WithX5C()) + } + opts = append(opts, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, opts...) + if err != nil { + return nil, err + } + obo := OnBehalfOfCredential{assertion: userAssertion, client: c} + obo.s = newSyncer(credNameOBO, tenantID, options.AdditionallyAllowedTenants, obo.requestToken, obo.requestToken) + return &obo, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (o *OnBehalfOfCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return o.s.GetToken(ctx, opts) +} + +func (o *OnBehalfOfCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := o.client.AcquireTokenOnBehalfOf(ctx, o.assertion, opts.Scopes, confidential.WithTenantID(opts.TenantID)) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*OnBehalfOfCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go new file mode 100644 index 00000000..ae385559 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go @@ -0,0 +1,130 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type authFn func(context.Context, policy.TokenRequestOptions) (azcore.AccessToken, error) + +// syncer synchronizes authentication calls so that goroutines can share a credential instance +type syncer struct { + addlTenants []string + authing bool + cond *sync.Cond + reqToken, silent authFn + name, tenant string +} + +func newSyncer(name, tenant string, additionalTenants []string, reqToken, silentAuth authFn) *syncer { + return &syncer{ + addlTenants: resolveAdditionalTenants(additionalTenants), + cond: &sync.Cond{L: &sync.Mutex{}}, + name: name, + reqToken: reqToken, + silent: silentAuth, + tenant: tenant, + } +} + +// GetToken ensures that only one goroutine authenticates at a time +func (s *syncer) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var at azcore.AccessToken + var err error + if len(opts.Scopes) == 0 { + return at, errors.New(s.name + ".GetToken() requires at least one scope") + } + // we don't resolve the tenant for managed identities because they can acquire tokens only from their home tenants + if s.name != credNameManagedIdentity { + tenant, err := s.resolveTenant(opts.TenantID) + if err != nil { + return at, err + } + opts.TenantID = tenant + } + auth := false + s.cond.L.Lock() + defer s.cond.L.Unlock() + for { + at, err = s.silent(ctx, opts) + if err == nil { + // got a token + break + } + if !s.authing { + // this goroutine will request a token + s.authing, auth = true, true + break + } + // another goroutine is acquiring a token; wait for it to finish, then try silent auth again + s.cond.Wait() + } + if auth { + s.authing = false + at, err = s.reqToken(ctx, opts) + s.cond.Broadcast() + } + if err != nil { + // Return credentialUnavailableError directly because that type affects the behavior of credential chains. + // Otherwise, return AuthenticationFailedError. + var unavailableErr *credentialUnavailableError + if !errors.As(err, &unavailableErr) { + res := getResponseFromError(err) + err = newAuthenticationFailedError(s.name, err.Error(), res, err) + } + } else if log.Should(EventAuthentication) { + scope := strings.Join(opts.Scopes, ", ") + msg := fmt.Sprintf(`%s.GetToken() acquired a token for scope "%s"\n`, s.name, scope) + log.Write(EventAuthentication, msg) + } + return at, err +} + +// resolveTenant returns the correct tenant for a token request given the credential's +// configuration, or an error when the specified tenant isn't allowed by that configuration +func (s *syncer) resolveTenant(requested string) (string, error) { + if requested == "" || requested == s.tenant { + return s.tenant, nil + } + if s.tenant == "adfs" { + return "", errors.New("ADFS doesn't support tenants") + } + if !validTenantID(requested) { + return "", errors.New(tenantIDValidationErr) + } + for _, t := range s.addlTenants { + if t == "*" || t == requested { + return requested, nil + } + } + return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, s.name, requested) +} + +// resolveAdditionalTenants returns a copy of tenants, simplified when tenants contains a wildcard +func resolveAdditionalTenants(tenants []string) []string { + if len(tenants) == 0 { + return nil + } + for _, t := range tenants { + // a wildcard makes all other values redundant + if t == "*" { + return []string{"*"} + } + } + cp := make([]string, len(tenants)) + copy(cp, tenants) + return cp +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go new file mode 100644 index 00000000..8e652e33 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -0,0 +1,81 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameUserPassword = "UsernamePasswordCredential" + +// UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential. +type UsernamePasswordCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, +// because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible +// with any form of multi-factor authentication, and the application must already have user or admin consent. +// This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts. +type UsernamePasswordCredential struct { + account public.Account + client publicClient + password, username string + s *syncer +} + +// NewUsernamePasswordCredential creates a UsernamePasswordCredential. clientID is the ID of the application the user +// will authenticate to. Pass nil for options to accept defaults. +func NewUsernamePasswordCredential(tenantID string, clientID string, username string, password string, options *UsernamePasswordCredentialOptions) (*UsernamePasswordCredential, error) { + if options == nil { + options = &UsernamePasswordCredentialOptions{} + } + c, err := getPublicClient(clientID, tenantID, &options.ClientOptions, public.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + if err != nil { + return nil, err + } + upc := UsernamePasswordCredential{client: c, password: password, username: username} + upc.s = newSyncer(credNameUserPassword, tenantID, options.AdditionallyAllowedTenants, upc.requestToken, upc.silentAuth) + return &upc, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.s.GetToken(ctx, opts) +} + +func (c *UsernamePasswordCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenByUsernamePassword(ctx, opts.Scopes, c.username, c.password, public.WithTenantID(opts.TenantID)) + if err == nil { + c.account = ar.Account + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *UsernamePasswordCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, + public.WithSilentAccount(c.account), + public.WithTenantID(opts.TenantID), + ) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go new file mode 100644 index 00000000..1a526b2e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -0,0 +1,15 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +const ( + // UserAgent is the string to be used in the user agent string when making requests. + component = "azidentity" + + // Version is the semantic version (see http://semver.org) of this module. + version = "v1.3.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go new file mode 100644 index 00000000..7bfb3436 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -0,0 +1,126 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "os" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +const credNameWorkloadIdentity = "WorkloadIdentityCredential" + +// WorkloadIdentityCredential supports Azure workload identity on Kubernetes. +// See [Azure Kubernetes Service documentation] for more information. +// +// [Azure Kubernetes Service documentation]: https://learn.microsoft.com/azure/aks/workload-identity-overview +type WorkloadIdentityCredential struct { + assertion, file string + cred *ClientAssertionCredential + expires time.Time + mtx *sync.RWMutex +} + +// WorkloadIdentityCredentialOptions contains optional parameters for WorkloadIdentityCredential. +type WorkloadIdentityCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. + ClientID string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID. + TenantID string + // TokenFilePath is the path a file containing the workload identity token. Defaults to the value of the + // environment variable AZURE_FEDERATED_TOKEN_FILE. + TokenFilePath string +} + +// NewWorkloadIdentityCredential constructs a WorkloadIdentityCredential. Service principal configuration is read +// from environment variables as set by the Azure workload identity webhook. Set options to override those values. +func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) (*WorkloadIdentityCredential, error) { + if options == nil { + options = &WorkloadIdentityCredentialOptions{} + } + ok := false + clientID := options.ClientID + if clientID == "" { + if clientID, ok = os.LookupEnv(azureClientID); !ok { + return nil, errors.New("no client ID specified. Check pod configuration or set ClientID in the options") + } + } + file := options.TokenFilePath + if file == "" { + if file, ok = os.LookupEnv(azureFederatedTokenFile); !ok { + return nil, errors.New("no token file specified. Check pod configuration or set TokenFilePath in the options") + } + } + tenantID := options.TenantID + if tenantID == "" { + if tenantID, ok = os.LookupEnv(azureTenantID); !ok { + return nil, errors.New("no tenant ID specified. Check pod configuration or set TenantID in the options") + } + } + w := WorkloadIdentityCredential{file: file, mtx: &sync.RWMutex{}} + caco := ClientAssertionCredentialOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientAssertionCredential(tenantID, clientID, w.getAssertion, &caco) + if err != nil { + return nil, err + } + // we want "WorkloadIdentityCredential" in log messages, not "ClientAssertionCredential" + cred.s.name = credNameWorkloadIdentity + w.cred = cred + return &w, nil +} + +// GetToken requests an access token from Azure Active Directory. Azure SDK clients call this method automatically. +func (w *WorkloadIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return w.cred.GetToken(ctx, opts) +} + +// getAssertion returns the specified file's content, which is expected to be a Kubernetes service account token. +// Kubernetes is responsible for updating the file as service account tokens expire. +func (w *WorkloadIdentityCredential) getAssertion(context.Context) (string, error) { + w.mtx.RLock() + if w.expires.Before(time.Now()) { + // ensure only one goroutine at a time updates the assertion + w.mtx.RUnlock() + w.mtx.Lock() + defer w.mtx.Unlock() + // double check because another goroutine may have acquired the write lock first and done the update + if now := time.Now(); w.expires.Before(now) { + content, err := os.ReadFile(w.file) + if err != nil { + return "", err + } + w.assertion = string(content) + // Kubernetes rotates service account tokens when they reach 80% of their total TTL. The shortest TTL + // is 1 hour. That implies the token we just read is valid for at least 12 minutes (20% of 1 hour), + // but we add some margin for safety. + w.expires = now.Add(10 * time.Minute) + } + } else { + defer w.mtx.RUnlock() + } + return w.assertion, nil +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt similarity index 86% rename from vendor/github.com/mitchellh/go-testing-interface/LICENSE rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt index a3866a29..48ea6616 100644 --- a/vendor/github.com/mitchellh/go-testing-interface/LICENSE +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2016 Mitchell Hashimoto +Copyright (c) Microsoft Corporation. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -9,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go new file mode 100644 index 00000000..245af7d2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag + +import ( + "fmt" + "runtime" + "strings" +) + +// Caller returns the file and line number of a frame on the caller's stack. +// If the funtion fails an empty string is returned. +// skipFrames - the number of frames to skip when determining the caller. +// Passing a value of 0 will return the immediate caller of this function. +func Caller(skipFrames int) string { + if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok { + // the skipFrames + 1 is to skip ourselves + frame := runtime.FuncForPC(pc) + return fmt.Sprintf("%s()\n\t%s:%d", frame.Name(), file, line) + } + return "" +} + +// StackTrace returns a formatted stack trace string. +// If the funtion fails an empty string is returned. +// skipFrames - the number of stack frames to skip before composing the trace string. +// totalFrames - the maximum number of stack frames to include in the trace string. +func StackTrace(skipFrames, totalFrames int) string { + pcCallers := make([]uintptr, totalFrames) + if frames := runtime.Callers(skipFrames, pcCallers); frames == 0 { + return "" + } + frames := runtime.CallersFrames(pcCallers) + sb := strings.Builder{} + for { + frame, more := frames.Next() + sb.WriteString(frame.Function) + sb.WriteString("()\n\t") + sb.WriteString(frame.File) + sb.WriteRune(':') + sb.WriteString(fmt.Sprintf("%d\n", frame.Line)) + if !more { + break + } + } + return sb.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go new file mode 100644 index 00000000..66bf13e5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go new file mode 100644 index 00000000..8c6eacb6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go new file mode 100644 index 00000000..ade7b348 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go @@ -0,0 +1,16 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo + +// NonRetriable represents a non-transient error. This works in +// conjunction with the retry policy, indicating that the error condition +// is idempotent, so no retries will be attempted. +// Use errors.As() to access this interface in the error chain. +type NonRetriable interface { + error + NonRetriable() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go new file mode 100644 index 00000000..d4ed6ccc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go @@ -0,0 +1,124 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "io" + "net/http" +) + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// PayloadOptions contains the optional values for the Payload func. +// NOT exported but used by azcore. +type PayloadOptions struct { + // BytesModifier receives the downloaded byte slice and returns an updated byte slice. + // Use this to modify the downloaded bytes in a payload (e.g. removing a BOM). + BytesModifier func([]byte) []byte +} + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +// Exported as runtime.Payload() WITHOUT the opts parameter. +func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) { + modifyBytes := func(b []byte) []byte { return b } + if opts != nil && opts.BytesModifier != nil { + modifyBytes = opts.BytesModifier + } + + // r.Body won't be a nopClosingBytesReader if downloading was skipped + if buf, ok := resp.Body.(*nopClosingBytesReader); ok { + bytesBody := modifyBytes(buf.Bytes()) + buf.Set(bytesBody) + return bytesBody, nil + } + + bytesBody, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, err + } + + bytesBody = modifyBytes(bytesBody) + resp.Body = &nopClosingBytesReader{s: bytesBody} + return bytesBody, nil +} + +// PayloadDownloaded returns true if the response body has already been downloaded. +// This implies that the Payload() func above has been previously called. +// NOT exported but used by azcore. +func PayloadDownloaded(resp *http.Response) bool { + _, ok := resp.Body.(*nopClosingBytesReader) + return ok +} + +// nopClosingBytesReader is an io.ReadSeekCloser around a byte slice. +// It also provides direct access to the byte slice to avoid rereading. +type nopClosingBytesReader struct { + s []byte + i int64 +} + +// Bytes returns the underlying byte slice. +func (r *nopClosingBytesReader) Bytes() []byte { + return r.s +} + +// Close implements the io.Closer interface. +func (*nopClosingBytesReader) Close() error { + return nil +} + +// Read implements the io.Reader interface. +func (r *nopClosingBytesReader) Read(b []byte) (n int, err error) { + if r.i >= int64(len(r.s)) { + return 0, io.EOF + } + n = copy(b, r.s[r.i:]) + r.i += int64(n) + return +} + +// Set replaces the existing byte slice with the specified byte slice and resets the reader. +func (r *nopClosingBytesReader) Set(b []byte) { + r.s = b + r.i = 0 +} + +// Seek implements the io.Seeker interface. +func (r *nopClosingBytesReader) Seek(offset int64, whence int) (int64, error) { + var i int64 + switch whence { + case io.SeekStart: + i = offset + case io.SeekCurrent: + i = r.i + offset + case io.SeekEnd: + i = int64(len(r.s)) + offset + default: + return 0, errors.New("nopClosingBytesReader: invalid whence") + } + if i < 0 { + return 0, errors.New("nopClosingBytesReader: negative position") + } + r.i = i + return i, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go new file mode 100644 index 00000000..d7876d29 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go new file mode 100644 index 00000000..4f1dcf1b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log + +import ( + "fmt" + "os" + "time" +) + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Event is used to group entries. Each group can be toggled on or off. +type Event string + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +func SetEvents(cls ...Event) { + log.cls = cls +} + +// SetListener will set the Logger to write to the specified listener. +func SetListener(lst func(Event, string)) { + log.lst = lst +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// END PUBLIC SURFACE AREA +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. +func Should(cls Event) bool { + if log.lst == nil { + return false + } + if log.cls == nil || len(log.cls) == 0 { + return true + } + for _, c := range log.cls { + if c == cls { + return true + } + } + return false +} + +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. +func Write(cls Event, message string) { + if !Should(cls) { + return + } + log.lst(cls, message) +} + +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. +func Writef(cls Event, format string, a ...interface{}) { + if !Should(cls) { + return + } + log.lst(cls, fmt.Sprintf(format, a...)) +} + +// TestResetEvents is used for TESTING PURPOSES ONLY. +func TestResetEvents() { + log.cls = nil +} + +// logger controls which events to log and writing to the underlying log. +type logger struct { + cls []Event + lst func(Event, string) +} + +// the process-wide logger +var log logger + +func init() { + initLogging() +} + +// split out for testing purposes +func initLogging() { + if cls := os.Getenv("AZURE_SDK_GO_LOGGING"); cls == "all" { + // cls could be enhanced to support a comma-delimited list of log events + log.lst = func(cls Event, msg string) { + // simple console logger, it writes to stderr in the following format: + // [time-stamp] Event: message + fmt.Fprintf(os.Stderr, "[%s] %s: %s\n", time.Now().Format(time.StampMicro), cls, msg) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go new file mode 100644 index 00000000..db826962 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go @@ -0,0 +1,155 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package poller + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// the well-known set of LRO status/provisioning state values. +const ( + StatusSucceeded = "Succeeded" + StatusCanceled = "Canceled" + StatusFailed = "Failed" + StatusInProgress = "InProgress" +) + +// these are non-conformant states that we've seen in the wild. +// we support them for back-compat. +const ( + StatusCancelled = "Cancelled" + StatusCompleted = "Completed" +) + +// IsTerminalState returns true if the LRO's state is terminal. +func IsTerminalState(s string) bool { + return Failed(s) || Succeeded(s) +} + +// Failed returns true if the LRO's state is terminal failure. +func Failed(s string) bool { + return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) || strings.EqualFold(s, StatusCancelled) +} + +// Succeeded returns true if the LRO's state is terminal success. +func Succeeded(s string) bool { + return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusCompleted) +} + +// returns true if the LRO response contains a valid HTTP status code +func StatusCodeValid(resp *http.Response) bool { + return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent) +} + +// IsValidURL verifies that the URL is valid and absolute. +func IsValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// ErrNoBody is returned if the response didn't contain a body. +var ErrNoBody = errors.New("the response did not contain a body") + +// GetJSON reads the response body into a raw JSON object. +// It returns ErrNoBody if there was no content. +func GetJSON(resp *http.Response) (map[string]any, error) { + body, err := exported.Payload(resp, nil) + if err != nil { + return nil, err + } + if len(body) == 0 { + return nil, ErrNoBody + } + // unmarshall the body to get the value + var jsonBody map[string]any + if err = json.Unmarshal(body, &jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// provisioningState returns the provisioning state from the response or the empty string. +func provisioningState(jsonBody map[string]any) string { + jsonProps, ok := jsonBody["properties"] + if !ok { + return "" + } + props, ok := jsonProps.(map[string]any) + if !ok { + return "" + } + rawPs, ok := props["provisioningState"] + if !ok { + return "" + } + ps, ok := rawPs.(string) + if !ok { + return "" + } + return ps +} + +// status returns the status from the response or the empty string. +func status(jsonBody map[string]any) string { + rawStatus, ok := jsonBody["status"] + if !ok { + return "" + } + status, ok := rawStatus.(string) + if !ok { + return "" + } + return status +} + +// GetStatus returns the LRO's status from the response body. +// Typically used for Azure-AsyncOperation flows. +// If there is no status in the response body the empty string is returned. +func GetStatus(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return status(jsonBody), nil +} + +// GetProvisioningState returns the LRO's state from the response body. +// If there is no state in the response body the empty string is returned. +func GetProvisioningState(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return provisioningState(jsonBody), nil +} + +// GetResourceLocation returns the LRO's resourceLocation value from the response body. +// Typically used for Operation-Location flows. +// If there is no resourceLocation in the response body the empty string is returned. +func GetResourceLocation(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + v, ok := jsonBody["resourceLocation"] + if !ok { + // it might be ok if the field doesn't exist, the caller must make that determination + return "", nil + } + vv, ok := v.(string) + if !ok { + return "", fmt.Errorf("the resourceLocation value %v was not in string format", v) + } + return vv, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go new file mode 100644 index 00000000..238ef42e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -0,0 +1,123 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package temporal + +import ( + "sync" + "time" +) + +// AcquireResource abstracts a method for refreshing a temporal resource. +type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error) + +// Resource is a temporal resource (usually a credential) that requires periodic refreshing. +type Resource[TResource, TState any] struct { + // cond is used to synchronize access to the shared resource embodied by the remaining fields + cond *sync.Cond + + // acquiring indicates that some thread/goroutine is in the process of acquiring/updating the resource + acquiring bool + + // resource contains the value of the shared resource + resource TResource + + // expiration indicates when the shared resource expires; it is 0 if the resource was never acquired + expiration time.Time + + // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource + lastAttempt time.Time + + // acquireResource is the callback function that actually acquires the resource + acquireResource AcquireResource[TResource, TState] +} + +// NewResource creates a new Resource that uses the specified AcquireResource for refreshing. +func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] { + return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar} +} + +// Get returns the underlying resource. +// If the resource is fresh, no refresh is performed. +func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { + // If the resource is expiring within this time window, update it eagerly. + // This allows other threads/goroutines to keep running by using the not-yet-expired + // resource value while one thread/goroutine updates the resource. + const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration + const backoff = 30 * time.Second // Minimum wait time between eager update attempts + + now, acquire, expired := time.Now(), false, false + + // acquire exclusive lock + er.cond.L.Lock() + resource := er.resource + + for { + expired = er.expiration.IsZero() || er.expiration.Before(now) + if expired { + // The resource was never acquired or has expired + if !er.acquiring { + // If another thread/goroutine is not acquiring/updating the resource, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // Getting here means that this thread/goroutine will wait for the updated resource + } else if er.expiration.Add(-window).Before(now) { + // The resource is valid but is expiring within the time window + if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) { + // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted + // to do so within the last 30 seconds, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // This thread/goroutine will use the existing resource value while another updates it + resource = er.resource + break + } else { + // The resource is not close to expiring, this thread/goroutine should use its current value + resource = er.resource + break + } + // If we get here, wait for the new resource value to be acquired/updated + er.cond.Wait() + } + er.cond.L.Unlock() // Release the lock so no threads/goroutines are blocked + + var err error + if acquire { + // This thread/goroutine has been selected to acquire/update the resource + var expiration time.Time + var newValue TResource + er.lastAttempt = now + newValue, expiration, err = er.acquireResource(state) + + // Atomically, update the shared resource's new value & expiration. + er.cond.L.Lock() + if err == nil { + // Update resource & expiration, return the new value + resource = newValue + er.resource, er.expiration = resource, expiration + } else if !expired { + // An eager update failed. Discard the error and return the current--still valid--resource value + err = nil + } + er.acquiring = false // Indicate that no thread/goroutine is currently acquiring the resource + + // Wake up any waiting threads/goroutines since there is a resource they can ALL use + er.cond.L.Unlock() + er.cond.Broadcast() + } + return resource, err // Return the resource this thread/goroutine can use +} + +// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get(). +func (er *Resource[TResource, TState]) Expire() { + er.cond.L.Lock() + defer er.cond.L.Unlock() + + // Reset the expiration as if we never got this resource to begin with + er.expiration = time.Time{} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go new file mode 100644 index 00000000..a3824bee --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go new file mode 100644 index 00000000..278ac9cd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go @@ -0,0 +1,76 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid + +import ( + "crypto/rand" + "errors" + "fmt" + "strconv" +) + +// The UUID reserved variants. +const ( + reservedRFC4122 byte = 0x40 +) + +// A UUID representation compliant with specification in RFC4122 document. +type UUID [16]byte + +// New returns a new UUID using the RFC4122 algorithm. +func New() (UUID, error) { + u := UUID{} + // Set all bits to pseudo-random values. + // NOTE: this takes a process-wide lock + _, err := rand.Read(u[:]) + if err != nil { + return u, err + } + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return u, nil +} + +// String returns the UUID in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format. +func (u UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// Parse parses a string formatted as "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +// or "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" into a UUID. +func Parse(s string) (UUID, error) { + var uuid UUID + // ensure format + switch len(s) { + case 36: + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 38: + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + s = s[1:37] + default: + return uuid, errors.New("invalid UUID format") + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + // parse chunks + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + b, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return uuid, fmt.Errorf("invalid UUID format: %s", err) + } + uuid[i] = byte(b) + } + return uuid, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md new file mode 100644 index 00000000..f93ca445 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md @@ -0,0 +1,136 @@ +# Release History + +## 0.12.0 (2023-06-08) + +### Breaking Changes + +* Renamed `GetRandomBytesRequest` to `GetRandomBytesParameters` +* `ListDeletedKey` to `ListDeletedKeyProperties` +* `ListKeys` to `ListKeyProperties` +* `DeletedKeyBundle` to `DeletedKey` +* `KeyBundle` to `KeyVaultKey` +* `RestoreKeyParameters.KeyBundleBackup` to `RestoreKeyParameters.KeyBackup` +* `DeletedKeyItem` to `DeletedKeyProperties` +* `KeyItem` to `KeyProperties` +* `DeletedKeyListResult` to `DeletedKeyPropertiesListResult` +* `KeyListResult` `KeyPropertiesListResult` +* `KeyOperationsParameters` to `KeyOperationParameters` +* Changed `JSONWebKey.KeyOperations` from type []*string to []*KeyOperation +* `ReleaseParameters.Enc` to `ReleaseParameters.Algorithm` +* `KeyOperationParameters.AAD` to `KeyOperationParameters.AdditionalAuthenticatedData` +* `KeyOperationParameters.Tag` to `KeyOperationParameters.AuthenticationTag` +* `JSONWebKeyOperation` to `KeyOperation` +* `JSONWebKeyCurveName` to `KeyCurveName` +* `JSONWebKeyEncryptionAlgorithm` to `EncryptionAlgorithm` +* `JSONWebKeySignatureAlgorithm` to `SignatureAlgorithm` +* `JSONWebKeyType` to `KeyType` +* `LifetimeActions` to `LifetimeAction` +* Removed `DeletionRecoveryLevel` type +* Removed `SignatureAlgorithmRSNULL` constant +* Removed `KeyOperationExport` constant +* Removed `MaxResults` option + +### Other Changes +* Updated dependencies + +## 0.11.0 (2023-04-13) + +### Breaking Changes +* Moved from `sdk/keyvault/azkeys` to `sdk/security/keyvault/azkeys` + +## 0.10.0 (2023-04-13) + +### Features Added +* Upgraded to api version 7.4 + +### Breaking Changes +* Renamed `ActionType` to `KeyRotationPolicyAction` + +## 0.9.0 (2022-11-08) + +### Breaking Changes +* `NewClient` returns an `error` + +## 0.8.1 (2022-09-20) + +### Features Added +* Added `ClientOptions.DisableChallengeResourceVerification`. + See https://aka.ms/azsdk/blog/vault-uri for more information. + +## 0.8.0 (2022-09-12) + +### Breaking Changes +* Verify the challenge resource matches the vault domain. + +## 0.7.0 (2022-08-09) + +### Breaking Changes +* Changed type of `NewClient` options parameter to `azkeys.ClientOptions`, which embeds + the former type, `azcore.ClientOptions` + +## 0.6.0 (2022-07-07) + +### Breaking Changes +* The `Client` API now corresponds more directly to the Key Vault REST API. + Most method signatures and types have changed. See the + [module documentation](https://aka.ms/azsdk/go/keyvault-keys/docs) + for updated code examples and more details. + +### Other Changes +* Upgrade to latest `azcore` + +## 0.5.1 (2022-05-12) + +### Other Changes +* Update to latest `azcore` and `internal` modules. + +## 0.5.0 (2022-04-06) + +### Features Added +* Added the Name property on `Key` + +### Breaking Changes +* Requires go 1.18 +* `ListPropertiesOfDeletedKeysPager` has `More() bool` and `NextPage(context.Context) (ListPropertiesOfDeletedKeysPage, error)` for paging over deleted keys. +* `ListPropertiesOfKeyVersionsPager` has `More() bool` and `NextPage(context.Context) (ListPropertiesOfKeyVersionsPage, error)` for paging over deleted keys. +* Removing `RawResponse *http.Response` from `crypto` response types + +## 0.4.0 (2022-03-08) + +### Features Added +* Adds the `ReleasePolicy` parameter to the `UpdateKeyPropertiesOptions` struct. +* Adds the `Immutable` boolean to the `KeyReleasePolicy` model. +* Added a `ToPtr` method on `KeyType` constant + +### Breaking Changes +* Requires go 1.18 +* Changed the `Data` to `EncodedPolicy` on the `KeyReleasePolicy` struct. +* Changed the `Updated`, `Created`, and `Expires` properties to `UpdatedOn`, `CreatedOn`, and `ExpiresOn`. +* Renamed `JSONWebKeyOperation` to `Operation`. +* Renamed `JSONWebKeyCurveName` to `CurveName` +* Prefixed all KeyType constants with `KeyType` +* Changed `KeyBundle` to `KeyVaultKey` and `DeletedKeyBundle` to `DeletedKey` +* Renamed `KeyAttributes` to `KeyProperties` +* Renamed `ListKeyVersions` to `ListPropertiesOfKeyVersions` +* Removed `Attributes` struct +* Changed `CreateOCTKey`/`Response`/`Options` to `CreateOctKey`/`Response`/`Options` +* Removed all `RawResponse *http.Response` fields from response structs. + +## 0.3.0 (2022-02-08) + +### Breaking Changes +* Changed the `Tags` properties from `map[string]*string` to `map[string]string` + +### Bugs Fixed +* Fixed a bug in `UpdateKeyProperties` where the `KeyOps` would be deleted if the `UpdateKeyProperties.KeyOps` value was left empty. + +## 0.2.0 (2022-01-12) + +### Bugs Fixed +* Fixes a bug in `crypto.NewClient` where the key version was required in the path, it is no longer required but is recommended. + +### Other Changes +* Updates `azcore` dependency from `v0.20.0` to `v0.21.0` + +## 0.1.0 (2021-11-09) +* This is the initial release of the `azkeys` library diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/LICENSE.txt new file mode 100644 index 00000000..d1ca00f2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/README.md new file mode 100644 index 00000000..709fe300 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/README.md @@ -0,0 +1,147 @@ +# Azure Key Vault Keys client module for Go + +* Cryptographic key management (this module) - create, store, and control access to the keys used to encrypt your data +* Managed HSM administration ([azadmin](https://aka.ms/azsdk/go/keyvault-admin/docs)) - role-based access control (RBAC), settings, and vault-level backup and restore options +* Certificate management ([azcertificates](https://aka.ms/azsdk/go/keyvault-certificates/docs)) - create, manage, and deploy public and private SSL/TLS certificates +* Secrets management ([azsecrets](https://aka.ms/azsdk/go/keyvault-secrets/docs)) - securely store and control access to tokens, passwords, certificates, API keys, and other secrets + +[Source code][key_client_src] | [Package (pkg.go.dev)][goget_azkeys] | [Product documentation][keyvault_docs] | [Samples][keys_samples] + +## Getting started + +### Install packages + +Install `azkeys` and `azidentity` with `go get`: +```Bash +go get github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` +[azidentity][azure_identity] is used for Azure Active Directory authentication as demonstrated below. + +### Prerequisites + +* An [Azure subscription][azure_sub] +* A supported Go version (the Azure SDK supports the two most recent Go releases) +* A key vault. If you need to create one, see the Key Vault documentation for instructions on doing so in the [Azure Portal][azure_keyvault_portal] or with the [Azure CLI][azure_keyvault_cli]. + +### Authentication + +This document demonstrates using [azidentity.NewDefaultAzureCredential][default_cred_ref] to authenticate. This credential type works in both local development and production environments. We recommend using a [managed identity][managed_identity] in production. + +[Client][client_docs] accepts any [azidentity][azure_identity] credential. See the [azidentity][azure_identity] documentation for more information about other credential types. + +#### Create a client + +Constructing the client requires your vault's URL, which you can get from the Azure CLI or the Azure Portal. + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys" +) + +func main() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + // TODO: handle error + } + + client, err := azkeys.NewClient("https://.vault.azure.net", cred, nil) + if err != nil { + // TODO: handle error + } +} +``` + +## Key concepts + +### Keys + +Azure Key Vault can create and store RSA and elliptic curve keys. Both can optionally be protected by hardware security modules (HSMs). Azure Key Vault can also perform cryptographic operations with them. For more information about keys and supported operations and algorithms, see the [Key Vault documentation](https://docs.microsoft.com/azure/key-vault/keys/about-keys). + +[Client][client_docs] can create keys in the vault, get existing keys from the vault, update key metadata, and delete keys, as shown in the examples below. + +## Examples + +Get started with our [examples][keys_samples]. + +## Troubleshooting + +### Error Handling + +All methods which send HTTP requests return `*azcore.ResponseError` when these requests fail. `ResponseError` has error details and the raw response from Key Vault. + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azcore" + +resp, err := client.GetKey(context.Background(), "keyName", nil) +if err != nil { + var httpErr *azcore.ResponseError + if errors.As(err, &httpErr) { + // TODO: investigate httpErr + } else { + // TODO: not an HTTP error + } +} +``` + +### Logging + +This module uses the logging implementation in `azcore`. To turn on logging for all Azure SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. By default the logger writes to stderr. Use the `azcore/log` package to control log output. For example, logging only HTTP request and response events, and printing them to stdout: + +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// Print log events to stdout +azlog.SetListener(func(cls azlog.Event, msg string) { + fmt.Println(msg) +}) + +// Includes only requests and responses in logs +azlog.SetEvents(azlog.EventRequest, azlog.EventResponse) +``` + +### Accessing `http.Response` + +You can access the raw `*http.Response` returned by Key Vault using the `runtime.WithCaptureResponse` method and a context passed to any client method. + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +var response *http.Response +ctx := runtime.WithCaptureResponse(context.TODO(), &response) +_, err = client.GetKey(ctx, "keyName", nil) +if err != nil { + // TODO: handle error +} +// TODO: do something with response +``` + +### Additional Documentation + +For more extensive documentation on Azure Key Vault, see the [API reference documentation][reference_docs]. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact opencode@microsoft.com with any additional questions or comments. + + +[azure_identity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[azure_keyvault_cli]: https://docs.microsoft.com/azure/key-vault/general/quick-create-cli +[azure_keyvault_portal]: https://docs.microsoft.com/azure/key-vault/general/quick-create-portal +[azure_sub]: https://azure.microsoft.com/free/ +[default_cred_ref]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#NewDefaultAzureCredential +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[keyvault_docs]: https://docs.microsoft.com/azure/key-vault/ +[goget_azkeys]: https://aka.ms/azsdk/go/keyvault-keys/docs +[reference_docs]: https://aka.ms/azsdk/go/keyvault-keys/docs +[client_docs]: https://aka.ms/azsdk/go/keyvault-keys/docs#Client +[key_client_src]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/security/keyvault/azkeys/client.go +[keys_samples]: https://aka.ms/azsdk/go/keyvault-keys/docs#pkg-examples +[managed_identity]: https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fsecurity%2Fkeyvault%2Fazkeys%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/TROUBLESHOOTING.md new file mode 100644 index 00000000..857aa4c0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/TROUBLESHOOTING.md @@ -0,0 +1,4 @@ +# Troubleshoot Azure Key Vault Keys Client Module Issues + +See our [Azure Key Vault SDK Troubleshooting Guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/security/keyvault/TROUBLESHOOTING.md) +to troubleshoot issues common to Azure Key Vault client modules. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json new file mode 100644 index 00000000..1da72428 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/security/keyvault/azkeys", + "Tag": "go/security/keyvault/azkeys_afbe036428" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/autorest.md new file mode 100644 index 00000000..12bbe95e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/autorest.md @@ -0,0 +1,229 @@ +## Go + +```yaml +clear-output-folder: false +export-clients: true +go: true +input-file: https://github.com/Azure/azure-rest-api-specs/blob/551275acb80e1f8b39036b79dfc35a8f63b601a7/specification/keyvault/data-plane/Microsoft.KeyVault/stable/7.4/keys.json +license-header: MICROSOFT_MIT_NO_VERSION +module: github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys +openapi-type: "data-plane" +output-folder: ../azkeys +override-client-name: Client +security: "AADToken" +security-scopes: "https://vault.azure.net/.default" +use: "@autorest/go@4.0.0-preview.46" +version: "^3.0.0" + +directive: + # delete unused models + - remove-model: KeyExportParameters + - remove-model: KeyProperties + + # make vault URL a parameter of the client constructor + - from: swagger-document + where: $["x-ms-parameterized-host"] + transform: $.parameters[0]["x-ms-parameter-location"] = "client" + + # rename parameter models to match their methods + - rename-model: + from: KeyCreateParameters + to: CreateKeyParameters + - rename-model: + from: KeyExportParameters + to: ExportKeyParameters + - rename-model: + from: KeyImportParameters + to: ImportKeyParameters + - rename-model: + from: KeyReleaseParameters + to: ReleaseParameters + - rename-model: + from: KeyRestoreParameters + to: RestoreKeyParameters + - rename-model: + from: KeySignParameters + to: SignParameters + - rename-model: + from: KeyUpdateParameters + to: UpdateKeyParameters + - rename-model: + from: KeyVerifyParameters + to: VerifyParameters + - rename-model: + from: GetRandomBytesRequest + to: GetRandomBytesParameters + + # rename paged operations from Get* to List* + - rename-operation: + from: GetDeletedKeys + to: ListDeletedKeyProperties + - rename-operation: + from: GetKeys + to: ListKeyProperties + - rename-operation: + from: GetKeyVersions + to: ListKeyPropertiesVersions + + # rename KeyItem and KeyBundle + - rename-model: + from: DeletedKeyBundle + to: DeletedKey + - rename-model: + from: KeyItem + to: KeyProperties + - rename-model: + from: DeletedKeyItem + to: DeletedKeyProperties + - rename-model: + from: DeletedKeyListResult + to: DeletedKeyPropertiesListResult + - rename-model: + from: KeyListResult + to: KeyPropertiesListResult + - from: swagger-document + where: $.definitions.RestoreKeyParameters.properties.value + transform: $["x-ms-client-name"] = "KeyBackup" + + # Change LifetimeActions to LifetimeAction + - rename-model: + from: LifetimeActions + to: LifetimeAction + - rename-model: + from: LifetimeActionsType + to: LifetimeActionType + - rename-model: + from: LifetimeActionsTrigger + to: LifetimeActionTrigger + + # Remove MaxResults parameter + - where: "$.paths..*" + remove-parameter: + in: query + name: maxresults + + # KeyOps updates + - rename-model: + from: KeyOperationsParameters + to: KeyOperationParameters + - from: models.go + where: $ + transform: return $.replace(/KeyOps \[\]\*string/, "KeyOps []*KeyOperation"); + + # fix capitalization + - from: swagger-document + where: $.definitions.ImportKeyParameters.properties.Hsm + transform: $["x-ms-client-name"] = "HSM" + - from: swagger-document + where: $.definitions..properties..iv + transform: $["x-ms-client-name"] = "IV" + - from: swagger-document + where: $.definitions..properties..kid + transform: $["x-ms-client-name"] = "KID" + + # keyName, keyVersion -> name, version + - from: swagger-document + where: $.paths..parameters..[?(@.name=='key-name')] + transform: $["x-ms-client-name"] = "name" + - from: swagger-document + where: $.paths..parameters..[?(@.name=='key-version')] + transform: $["x-ms-client-name"] = "version" + + # KeyEncryptionAlgorithm renames + - from: swagger-document + where: $.definitions.ReleaseParameters.properties.enc + transform: $["x-ms-client-name"] = "algorithm" + + # rename KeyOperationsParameters fields + - from: swagger-document + where: $.definitions.KeyOperationParameters.properties.aad + transform: $["x-ms-client-name"] = "AdditionalAuthenticatedData" + - from: swagger-document + where: $.definitions.KeyOperationParameters.properties.tag + transform: $["x-ms-client-name"] = "AuthenticationTag" + + # remove JSONWeb Prefix + - from: + - models.go + - constants.go + where: $ + transform: return $.replace(/JSONWebKeyOperation/g, "KeyOperation"); + - from: + - models.go + - constants.go + where: $ + transform: return $.replace(/JSONWebKeyCurveName/g, "CurveName"); + - from: + - models.go + - constants.go + where: $ + transform: return $.replace(/JSONWebKeyEncryptionAlgorithm/g, "EncryptionAlgorithm"); + - from: + - models.go + - constants.go + where: $ + transform: return $.replace(/JSONWebKeySignatureAlgorithm/g, "SignatureAlgorithm"); + - from: + - models.go + - constants.go + where: $ + transform: return $.replace(/JSONWebKeyType/g, "KeyType"); + + # remove DeletionRecoveryLevel type + - from: models.go + where: $ + transform: return $.replace(/RecoveryLevel \*DeletionRecoveryLevel/g, "RecoveryLevel *string"); + - from: constants.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type DeletionRecoveryLevel string/, ""); + - from: constants.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func PossibleDeletionRecovery(?:.+\s)+\}/, ""); + - from: constants.go + where: $ + transform: return $.replace(/const \(\n\s\/\/ DeletionRecoveryLevel(?:.+\s)+\)/, ""); + + # delete SignatureAlgorithmRSNULL + - from: constants.go + where: $ + transform: return $.replace(/.*(\bSignatureAlgorithmRSNULL\b).*/g, ""); + + # delete KeyOperationExport + - from: constants.go + where: $ + transform: return $.replace(/.*(\bKeyOperationExport\b).*/g, ""); + + # delete unused error models + - from: models.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type (?:Error|KeyVaultError).+\{(?:\s.+\s)+\}\s/g, ""); + - from: models_serde.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func \(\w \*?(?:Error|KeyVaultError)\).*\{\s(?:.+\s)+\}\s/g, ""); + + # delete the Attributes model defined in common.json (it's used only with allOf) + - from: models.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type Attributes.+\{(?:\s.+\s)+\}\s/, ""); + - from: models_serde.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func \(a \*?Attributes\).*\{\s(?:.+\s)+\}\s/g, ""); + + # delete the version path param check (version == "" is legal for Key Vault but indescribable by OpenAPI) + - from: client.go + where: $ + transform: return $.replace(/\sif version == "" \{\s+.+version cannot be empty"\)\s+\}\s/g, ""); + + # delete client name prefix from method options and response types + - from: + - client.go + - models.go + - response_types.go + where: $ + transform: return $.replace(/Client(\w+)((?:Options|Response))/g, "$1$2"); + + # insert a handwritten type for "KID" fields so we can add parsing methods + - from: models.go + where: $ + transform: return $.replace(/(KID \*)string(\s+.*)/g, "$1ID$2") +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/build.go new file mode 100644 index 00000000..da8103e4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azkeys diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.security.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.security.yml new file mode 100644 index 00000000..f5f7c917 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.security.yml @@ -0,0 +1,34 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/azkeys + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/azkeys + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + TimeoutInMinutes: 120 + ServiceDirectory: 'security/keyvault/azkeys' + RunLiveTests: true + AdditionalMatrixConfigs: + - Name: keyvault_test_matrix_addons + Path: sdk/security/keyvault/azkeys/platform-matrix.json + Selection: sparse + GenerateVMJobs: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/client.go new file mode 100644 index 00000000..02ec6c7f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/client.go @@ -0,0 +1,1294 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azkeys + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// Client contains the methods for the Client group. +// Don't use this type directly, use a constructor function instead. +type Client struct { + internal *azcore.Client + endpoint string +} + +// BackupKey - The Key Backup operation exports a key from Azure Key Vault in a protected form. Note that this operation does +// NOT return key material in a form that can be used outside the Azure Key Vault system, +// the returned key material is either protected to a Azure Key Vault HSM or to Azure Key Vault itself. The intent of this +// operation is to allow a client to GENERATE a key in one Azure Key Vault +// instance, BACKUP the key, and then RESTORE it into another Azure Key Vault instance. The BACKUP operation may be used to +// export, in protected form, any key type from Azure Key Vault. Individual +// versions of a key cannot be backed up. BACKUP / RESTORE can be performed within geographical boundaries only; meaning that +// a BACKUP from one geographical area cannot be restored to another +// geographical area. For example, a backup from the US geographical area cannot be restored in an EU geographical area. This +// operation requires the key/backup permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - options - BackupKeyOptions contains the optional parameters for the Client.BackupKey method. +func (client *Client) BackupKey(ctx context.Context, name string, options *BackupKeyOptions) (BackupKeyResponse, error) { + req, err := client.backupKeyCreateRequest(ctx, name, options) + if err != nil { + return BackupKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BackupKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BackupKeyResponse{}, runtime.NewResponseError(resp) + } + return client.backupKeyHandleResponse(resp) +} + +// backupKeyCreateRequest creates the BackupKey request. +func (client *Client) backupKeyCreateRequest(ctx context.Context, name string, options *BackupKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/backup" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// backupKeyHandleResponse handles the BackupKey response. +func (client *Client) backupKeyHandleResponse(resp *http.Response) (BackupKeyResponse, error) { + result := BackupKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.BackupKeyResult); err != nil { + return BackupKeyResponse{}, err + } + return result, nil +} + +// CreateKey - The create key operation can be used to create any key type in Azure Key Vault. If the named key already exists, +// Azure Key Vault creates a new version of the key. It requires the keys/create +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name for the new key. The system will generate the version name for the new key. The value you provide may be +// copied globally for the purpose of running the service. The value provided should not +// include personally identifiable or sensitive information. +// - parameters - The parameters to create a key. +// - options - CreateKeyOptions contains the optional parameters for the Client.CreateKey method. +func (client *Client) CreateKey(ctx context.Context, name string, parameters CreateKeyParameters, options *CreateKeyOptions) (CreateKeyResponse, error) { + req, err := client.createKeyCreateRequest(ctx, name, parameters, options) + if err != nil { + return CreateKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return CreateKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CreateKeyResponse{}, runtime.NewResponseError(resp) + } + return client.createKeyHandleResponse(resp) +} + +// createKeyCreateRequest creates the CreateKey request. +func (client *Client) createKeyCreateRequest(ctx context.Context, name string, parameters CreateKeyParameters, options *CreateKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/create" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// createKeyHandleResponse handles the CreateKey response. +func (client *Client) createKeyHandleResponse(resp *http.Response) (CreateKeyResponse, error) { + result := CreateKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return CreateKeyResponse{}, err + } + return result, nil +} + +// Decrypt - The DECRYPT operation decrypts a well-formed block of ciphertext using the target encryption key and specified +// algorithm. This operation is the reverse of the ENCRYPT operation; only a single block of +// data may be decrypted, the size of this block is dependent on the target key and the algorithm to be used. The DECRYPT +// operation applies to asymmetric and symmetric keys stored in Azure Key Vault +// since it uses the private portion of the key. This operation requires the keys/decrypt permission. Microsoft recommends +// not to use CBC algorithms for decryption without first ensuring the integrity of +// the ciphertext using an HMAC, for example. See https://docs.microsoft.com/dotnet/standard/security/vulnerabilities-cbc-mode +// for more information. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for the decryption operation. +// - options - DecryptOptions contains the optional parameters for the Client.Decrypt method. +func (client *Client) Decrypt(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *DecryptOptions) (DecryptResponse, error) { + req, err := client.decryptCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return DecryptResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DecryptResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DecryptResponse{}, runtime.NewResponseError(resp) + } + return client.decryptHandleResponse(resp) +} + +// decryptCreateRequest creates the Decrypt request. +func (client *Client) decryptCreateRequest(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *DecryptOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/decrypt" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// decryptHandleResponse handles the Decrypt response. +func (client *Client) decryptHandleResponse(resp *http.Response) (DecryptResponse, error) { + result := DecryptResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return DecryptResponse{}, err + } + return result, nil +} + +// DeleteKey - The delete key operation cannot be used to remove individual versions of a key. This operation removes the +// cryptographic material associated with the key, which means the key is not usable for +// Sign/Verify, Wrap/Unwrap or Encrypt/Decrypt operations. This operation requires the keys/delete permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key to delete. +// - options - DeleteKeyOptions contains the optional parameters for the Client.DeleteKey method. +func (client *Client) DeleteKey(ctx context.Context, name string, options *DeleteKeyOptions) (DeleteKeyResponse, error) { + req, err := client.deleteKeyCreateRequest(ctx, name, options) + if err != nil { + return DeleteKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DeleteKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DeleteKeyResponse{}, runtime.NewResponseError(resp) + } + return client.deleteKeyHandleResponse(resp) +} + +// deleteKeyCreateRequest creates the DeleteKey request. +func (client *Client) deleteKeyCreateRequest(ctx context.Context, name string, options *DeleteKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// deleteKeyHandleResponse handles the DeleteKey response. +func (client *Client) deleteKeyHandleResponse(resp *http.Response) (DeleteKeyResponse, error) { + result := DeleteKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedKey); err != nil { + return DeleteKeyResponse{}, err + } + return result, nil +} + +// Encrypt - The ENCRYPT operation encrypts an arbitrary sequence of bytes using an encryption key that is stored in Azure +// Key Vault. Note that the ENCRYPT operation only supports a single block of data, the size +// of which is dependent on the target key and the encryption algorithm to be used. The ENCRYPT operation is only strictly +// necessary for symmetric keys stored in Azure Key Vault since protection with an +// asymmetric key can be performed using public portion of the key. This operation is supported for asymmetric keys as a convenience +// for callers that have a key-reference but do not have access to the +// public key material. This operation requires the keys/encrypt permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for the encryption operation. +// - options - EncryptOptions contains the optional parameters for the Client.Encrypt method. +func (client *Client) Encrypt(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *EncryptOptions) (EncryptResponse, error) { + req, err := client.encryptCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return EncryptResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return EncryptResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return EncryptResponse{}, runtime.NewResponseError(resp) + } + return client.encryptHandleResponse(resp) +} + +// encryptCreateRequest creates the Encrypt request. +func (client *Client) encryptCreateRequest(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *EncryptOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/encrypt" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// encryptHandleResponse handles the Encrypt response. +func (client *Client) encryptHandleResponse(resp *http.Response) (EncryptResponse, error) { + result := EncryptResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return EncryptResponse{}, err + } + return result, nil +} + +// GetDeletedKey - The Get Deleted Key operation is applicable for soft-delete enabled vaults. While the operation can be +// invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. This +// operation requires the keys/get permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - options - GetDeletedKeyOptions contains the optional parameters for the Client.GetDeletedKey method. +func (client *Client) GetDeletedKey(ctx context.Context, name string, options *GetDeletedKeyOptions) (GetDeletedKeyResponse, error) { + req, err := client.getDeletedKeyCreateRequest(ctx, name, options) + if err != nil { + return GetDeletedKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetDeletedKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetDeletedKeyResponse{}, runtime.NewResponseError(resp) + } + return client.getDeletedKeyHandleResponse(resp) +} + +// getDeletedKeyCreateRequest creates the GetDeletedKey request. +func (client *Client) getDeletedKeyCreateRequest(ctx context.Context, name string, options *GetDeletedKeyOptions) (*policy.Request, error) { + urlPath := "/deletedkeys/{key-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getDeletedKeyHandleResponse handles the GetDeletedKey response. +func (client *Client) getDeletedKeyHandleResponse(resp *http.Response) (GetDeletedKeyResponse, error) { + result := GetDeletedKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedKey); err != nil { + return GetDeletedKeyResponse{}, err + } + return result, nil +} + +// GetKey - The get key operation is applicable to all key types. If the requested key is symmetric, then no key material +// is released in the response. This operation requires the keys/get permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key to get. +// - version - Adding the version parameter retrieves a specific version of a key. This URI fragment is optional. If not specified, +// the latest version of the key is returned. +// - options - GetKeyOptions contains the optional parameters for the Client.GetKey method. +func (client *Client) GetKey(ctx context.Context, name string, version string, options *GetKeyOptions) (GetKeyResponse, error) { + req, err := client.getKeyCreateRequest(ctx, name, version, options) + if err != nil { + return GetKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetKeyResponse{}, runtime.NewResponseError(resp) + } + return client.getKeyHandleResponse(resp) +} + +// getKeyCreateRequest creates the GetKey request. +func (client *Client) getKeyCreateRequest(ctx context.Context, name string, version string, options *GetKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getKeyHandleResponse handles the GetKey response. +func (client *Client) getKeyHandleResponse(resp *http.Response) (GetKeyResponse, error) { + result := GetKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return GetKeyResponse{}, err + } + return result, nil +} + +// GetKeyRotationPolicy - The GetKeyRotationPolicy operation returns the specified key policy resources in the specified key +// vault. This operation requires the keys/get permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key in a given key vault. +// - options - GetKeyRotationPolicyOptions contains the optional parameters for the Client.GetKeyRotationPolicy method. +func (client *Client) GetKeyRotationPolicy(ctx context.Context, name string, options *GetKeyRotationPolicyOptions) (GetKeyRotationPolicyResponse, error) { + req, err := client.getKeyRotationPolicyCreateRequest(ctx, name, options) + if err != nil { + return GetKeyRotationPolicyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetKeyRotationPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetKeyRotationPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.getKeyRotationPolicyHandleResponse(resp) +} + +// getKeyRotationPolicyCreateRequest creates the GetKeyRotationPolicy request. +func (client *Client) getKeyRotationPolicyCreateRequest(ctx context.Context, name string, options *GetKeyRotationPolicyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/rotationpolicy" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getKeyRotationPolicyHandleResponse handles the GetKeyRotationPolicy response. +func (client *Client) getKeyRotationPolicyHandleResponse(resp *http.Response) (GetKeyRotationPolicyResponse, error) { + result := GetKeyRotationPolicyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyRotationPolicy); err != nil { + return GetKeyRotationPolicyResponse{}, err + } + return result, nil +} + +// GetRandomBytes - Get the requested number of bytes containing random values from a managed HSM. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - parameters - The request object to get random bytes. +// - options - GetRandomBytesOptions contains the optional parameters for the Client.GetRandomBytes method. +func (client *Client) GetRandomBytes(ctx context.Context, parameters GetRandomBytesParameters, options *GetRandomBytesOptions) (GetRandomBytesResponse, error) { + req, err := client.getRandomBytesCreateRequest(ctx, parameters, options) + if err != nil { + return GetRandomBytesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetRandomBytesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetRandomBytesResponse{}, runtime.NewResponseError(resp) + } + return client.getRandomBytesHandleResponse(resp) +} + +// getRandomBytesCreateRequest creates the GetRandomBytes request. +func (client *Client) getRandomBytesCreateRequest(ctx context.Context, parameters GetRandomBytesParameters, options *GetRandomBytesOptions) (*policy.Request, error) { + urlPath := "/rng" + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// getRandomBytesHandleResponse handles the GetRandomBytes response. +func (client *Client) getRandomBytesHandleResponse(resp *http.Response) (GetRandomBytesResponse, error) { + result := GetRandomBytesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RandomBytes); err != nil { + return GetRandomBytesResponse{}, err + } + return result, nil +} + +// ImportKey - The import key operation may be used to import any key type into an Azure Key Vault. If the named key already +// exists, Azure Key Vault creates a new version of the key. This operation requires the +// keys/import permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - Name for the imported key. The value you provide may be copied globally for the purpose of running the service. +// The value provided should not include personally identifiable or sensitive information. +// - parameters - The parameters to import a key. +// - options - ImportKeyOptions contains the optional parameters for the Client.ImportKey method. +func (client *Client) ImportKey(ctx context.Context, name string, parameters ImportKeyParameters, options *ImportKeyOptions) (ImportKeyResponse, error) { + req, err := client.importKeyCreateRequest(ctx, name, parameters, options) + if err != nil { + return ImportKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ImportKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ImportKeyResponse{}, runtime.NewResponseError(resp) + } + return client.importKeyHandleResponse(resp) +} + +// importKeyCreateRequest creates the ImportKey request. +func (client *Client) importKeyCreateRequest(ctx context.Context, name string, parameters ImportKeyParameters, options *ImportKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// importKeyHandleResponse handles the ImportKey response. +func (client *Client) importKeyHandleResponse(resp *http.Response) (ImportKeyResponse, error) { + result := ImportKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return ImportKeyResponse{}, err + } + return result, nil +} + +// NewListDeletedKeyPropertiesPager - Retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain +// the public part of a deleted key. This operation includes deletion-specific information. The Get Deleted Keys +// operation is applicable for vaults enabled for soft-delete. While the operation can be invoked on any vault, it will return +// an error if invoked on a non soft-delete enabled vault. This operation +// requires the keys/list permission. +// +// Generated from API version 7.4 +// - options - ListDeletedKeyPropertiesOptions contains the optional parameters for the Client.NewListDeletedKeyPropertiesPager +// method. +func (client *Client) NewListDeletedKeyPropertiesPager(options *ListDeletedKeyPropertiesOptions) *runtime.Pager[ListDeletedKeyPropertiesResponse] { + return runtime.NewPager(runtime.PagingHandler[ListDeletedKeyPropertiesResponse]{ + More: func(page ListDeletedKeyPropertiesResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListDeletedKeyPropertiesResponse) (ListDeletedKeyPropertiesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listDeletedKeyPropertiesCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ListDeletedKeyPropertiesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ListDeletedKeyPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListDeletedKeyPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.listDeletedKeyPropertiesHandleResponse(resp) + }, + }) +} + +// listDeletedKeyPropertiesCreateRequest creates the ListDeletedKeyProperties request. +func (client *Client) listDeletedKeyPropertiesCreateRequest(ctx context.Context, options *ListDeletedKeyPropertiesOptions) (*policy.Request, error) { + urlPath := "/deletedkeys" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listDeletedKeyPropertiesHandleResponse handles the ListDeletedKeyProperties response. +func (client *Client) listDeletedKeyPropertiesHandleResponse(resp *http.Response) (ListDeletedKeyPropertiesResponse, error) { + result := ListDeletedKeyPropertiesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedKeyPropertiesListResult); err != nil { + return ListDeletedKeyPropertiesResponse{}, err + } + return result, nil +} + +// NewListKeyPropertiesPager - Retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public +// part of a stored key. The LIST operation is applicable to all key types, however only the base key +// identifier, attributes, and tags are provided in the response. Individual versions of a key are not listed in the response. +// This operation requires the keys/list permission. +// +// Generated from API version 7.4 +// - options - ListKeyPropertiesOptions contains the optional parameters for the Client.NewListKeyPropertiesPager method. +func (client *Client) NewListKeyPropertiesPager(options *ListKeyPropertiesOptions) *runtime.Pager[ListKeyPropertiesResponse] { + return runtime.NewPager(runtime.PagingHandler[ListKeyPropertiesResponse]{ + More: func(page ListKeyPropertiesResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListKeyPropertiesResponse) (ListKeyPropertiesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listKeyPropertiesCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ListKeyPropertiesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ListKeyPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListKeyPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.listKeyPropertiesHandleResponse(resp) + }, + }) +} + +// listKeyPropertiesCreateRequest creates the ListKeyProperties request. +func (client *Client) listKeyPropertiesCreateRequest(ctx context.Context, options *ListKeyPropertiesOptions) (*policy.Request, error) { + urlPath := "/keys" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listKeyPropertiesHandleResponse handles the ListKeyProperties response. +func (client *Client) listKeyPropertiesHandleResponse(resp *http.Response) (ListKeyPropertiesResponse, error) { + result := ListKeyPropertiesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyPropertiesListResult); err != nil { + return ListKeyPropertiesResponse{}, err + } + return result, nil +} + +// NewListKeyPropertiesVersionsPager - The full key identifier, attributes, and tags are provided in the response. This operation +// requires the keys/list permission. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - options - ListKeyPropertiesVersionsOptions contains the optional parameters for the Client.NewListKeyPropertiesVersionsPager +// method. +func (client *Client) NewListKeyPropertiesVersionsPager(name string, options *ListKeyPropertiesVersionsOptions) *runtime.Pager[ListKeyPropertiesVersionsResponse] { + return runtime.NewPager(runtime.PagingHandler[ListKeyPropertiesVersionsResponse]{ + More: func(page ListKeyPropertiesVersionsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListKeyPropertiesVersionsResponse) (ListKeyPropertiesVersionsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listKeyPropertiesVersionsCreateRequest(ctx, name, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ListKeyPropertiesVersionsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ListKeyPropertiesVersionsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListKeyPropertiesVersionsResponse{}, runtime.NewResponseError(resp) + } + return client.listKeyPropertiesVersionsHandleResponse(resp) + }, + }) +} + +// listKeyPropertiesVersionsCreateRequest creates the ListKeyPropertiesVersions request. +func (client *Client) listKeyPropertiesVersionsCreateRequest(ctx context.Context, name string, options *ListKeyPropertiesVersionsOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/versions" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listKeyPropertiesVersionsHandleResponse handles the ListKeyPropertiesVersions response. +func (client *Client) listKeyPropertiesVersionsHandleResponse(resp *http.Response) (ListKeyPropertiesVersionsResponse, error) { + result := ListKeyPropertiesVersionsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyPropertiesListResult); err != nil { + return ListKeyPropertiesVersionsResponse{}, err + } + return result, nil +} + +// PurgeDeletedKey - The Purge Deleted Key operation is applicable for soft-delete enabled vaults. While the operation can +// be invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. +// This operation requires the keys/purge permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key +// - options - PurgeDeletedKeyOptions contains the optional parameters for the Client.PurgeDeletedKey method. +func (client *Client) PurgeDeletedKey(ctx context.Context, name string, options *PurgeDeletedKeyOptions) (PurgeDeletedKeyResponse, error) { + req, err := client.purgeDeletedKeyCreateRequest(ctx, name, options) + if err != nil { + return PurgeDeletedKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PurgeDeletedKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return PurgeDeletedKeyResponse{}, runtime.NewResponseError(resp) + } + return PurgeDeletedKeyResponse{}, nil +} + +// purgeDeletedKeyCreateRequest creates the PurgeDeletedKey request. +func (client *Client) purgeDeletedKeyCreateRequest(ctx context.Context, name string, options *PurgeDeletedKeyOptions) (*policy.Request, error) { + urlPath := "/deletedkeys/{key-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// RecoverDeletedKey - The Recover Deleted Key operation is applicable for deleted keys in soft-delete enabled vaults. It +// recovers the deleted key back to its latest version under /keys. An attempt to recover an non-deleted +// key will return an error. Consider this the inverse of the delete operation on soft-delete enabled vaults. This operation +// requires the keys/recover permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the deleted key. +// - options - RecoverDeletedKeyOptions contains the optional parameters for the Client.RecoverDeletedKey method. +func (client *Client) RecoverDeletedKey(ctx context.Context, name string, options *RecoverDeletedKeyOptions) (RecoverDeletedKeyResponse, error) { + req, err := client.recoverDeletedKeyCreateRequest(ctx, name, options) + if err != nil { + return RecoverDeletedKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RecoverDeletedKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RecoverDeletedKeyResponse{}, runtime.NewResponseError(resp) + } + return client.recoverDeletedKeyHandleResponse(resp) +} + +// recoverDeletedKeyCreateRequest creates the RecoverDeletedKey request. +func (client *Client) recoverDeletedKeyCreateRequest(ctx context.Context, name string, options *RecoverDeletedKeyOptions) (*policy.Request, error) { + urlPath := "/deletedkeys/{key-name}/recover" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// recoverDeletedKeyHandleResponse handles the RecoverDeletedKey response. +func (client *Client) recoverDeletedKeyHandleResponse(resp *http.Response) (RecoverDeletedKeyResponse, error) { + result := RecoverDeletedKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return RecoverDeletedKeyResponse{}, err + } + return result, nil +} + +// Release - The release key operation is applicable to all key types. The target key must be marked exportable. This operation +// requires the keys/release permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key to get. +// - version - Adding the version parameter retrieves a specific version of a key. +// - parameters - The parameters for the key release operation. +// - options - ReleaseOptions contains the optional parameters for the Client.Release method. +func (client *Client) Release(ctx context.Context, name string, version string, parameters ReleaseParameters, options *ReleaseOptions) (ReleaseResponse, error) { + req, err := client.releaseCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return ReleaseResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ReleaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ReleaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseHandleResponse(resp) +} + +// releaseCreateRequest creates the Release request. +func (client *Client) releaseCreateRequest(ctx context.Context, name string, version string, parameters ReleaseParameters, options *ReleaseOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/release" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// releaseHandleResponse handles the Release response. +func (client *Client) releaseHandleResponse(resp *http.Response) (ReleaseResponse, error) { + result := ReleaseResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyReleaseResult); err != nil { + return ReleaseResponse{}, err + } + return result, nil +} + +// RestoreKey - Imports a previously backed up key into Azure Key Vault, restoring the key, its key identifier, attributes +// and access control policies. The RESTORE operation may be used to import a previously backed +// up key. Individual versions of a key cannot be restored. The key is restored in its entirety with the same key name as +// it had when it was backed up. If the key name is not available in the target Key +// Vault, the RESTORE operation will be rejected. While the key name is retained during restore, the final key identifier +// will change if the key is restored to a different vault. Restore will restore all +// versions and preserve version identifiers. The RESTORE operation is subject to security constraints: The target Key Vault +// must be owned by the same Microsoft Azure Subscription as the source Key Vault +// The user must have RESTORE permission in the target Key Vault. This operation requires the keys/restore permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - parameters - The parameters to restore the key. +// - options - RestoreKeyOptions contains the optional parameters for the Client.RestoreKey method. +func (client *Client) RestoreKey(ctx context.Context, parameters RestoreKeyParameters, options *RestoreKeyOptions) (RestoreKeyResponse, error) { + req, err := client.restoreKeyCreateRequest(ctx, parameters, options) + if err != nil { + return RestoreKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RestoreKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RestoreKeyResponse{}, runtime.NewResponseError(resp) + } + return client.restoreKeyHandleResponse(resp) +} + +// restoreKeyCreateRequest creates the RestoreKey request. +func (client *Client) restoreKeyCreateRequest(ctx context.Context, parameters RestoreKeyParameters, options *RestoreKeyOptions) (*policy.Request, error) { + urlPath := "/keys/restore" + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// restoreKeyHandleResponse handles the RestoreKey response. +func (client *Client) restoreKeyHandleResponse(resp *http.Response) (RestoreKeyResponse, error) { + result := RestoreKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return RestoreKeyResponse{}, err + } + return result, nil +} + +// RotateKey - The operation will rotate the key based on the key policy. It requires the keys/rotate permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of key to be rotated. The system will generate a new version in the specified key. +// - options - RotateKeyOptions contains the optional parameters for the Client.RotateKey method. +func (client *Client) RotateKey(ctx context.Context, name string, options *RotateKeyOptions) (RotateKeyResponse, error) { + req, err := client.rotateKeyCreateRequest(ctx, name, options) + if err != nil { + return RotateKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RotateKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RotateKeyResponse{}, runtime.NewResponseError(resp) + } + return client.rotateKeyHandleResponse(resp) +} + +// rotateKeyCreateRequest creates the RotateKey request. +func (client *Client) rotateKeyCreateRequest(ctx context.Context, name string, options *RotateKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/rotate" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// rotateKeyHandleResponse handles the RotateKey response. +func (client *Client) rotateKeyHandleResponse(resp *http.Response) (RotateKeyResponse, error) { + result := RotateKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return RotateKeyResponse{}, err + } + return result, nil +} + +// Sign - The SIGN operation is applicable to asymmetric and symmetric keys stored in Azure Key Vault since this operation +// uses the private portion of the key. This operation requires the keys/sign permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for the signing operation. +// - options - SignOptions contains the optional parameters for the Client.Sign method. +func (client *Client) Sign(ctx context.Context, name string, version string, parameters SignParameters, options *SignOptions) (SignResponse, error) { + req, err := client.signCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return SignResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return SignResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SignResponse{}, runtime.NewResponseError(resp) + } + return client.signHandleResponse(resp) +} + +// signCreateRequest creates the Sign request. +func (client *Client) signCreateRequest(ctx context.Context, name string, version string, parameters SignParameters, options *SignOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/sign" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// signHandleResponse handles the Sign response. +func (client *Client) signHandleResponse(resp *http.Response) (SignResponse, error) { + result := SignResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return SignResponse{}, err + } + return result, nil +} + +// UnwrapKey - The UNWRAP operation supports decryption of a symmetric key using the target key encryption key. This operation +// is the reverse of the WRAP operation. The UNWRAP operation applies to asymmetric and +// symmetric keys stored in Azure Key Vault since it uses the private portion of the key. This operation requires the keys/unwrapKey +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for the key operation. +// - options - UnwrapKeyOptions contains the optional parameters for the Client.UnwrapKey method. +func (client *Client) UnwrapKey(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *UnwrapKeyOptions) (UnwrapKeyResponse, error) { + req, err := client.unwrapKeyCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return UnwrapKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return UnwrapKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return UnwrapKeyResponse{}, runtime.NewResponseError(resp) + } + return client.unwrapKeyHandleResponse(resp) +} + +// unwrapKeyCreateRequest creates the UnwrapKey request. +func (client *Client) unwrapKeyCreateRequest(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *UnwrapKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/unwrapkey" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// unwrapKeyHandleResponse handles the UnwrapKey response. +func (client *Client) unwrapKeyHandleResponse(resp *http.Response) (UnwrapKeyResponse, error) { + result := UnwrapKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return UnwrapKeyResponse{}, err + } + return result, nil +} + +// UpdateKey - In order to perform this operation, the key must already exist in the Key Vault. Note: The cryptographic material +// of a key itself cannot be changed. This operation requires the keys/update permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of key to update. +// - version - The version of the key to update. +// - parameters - The parameters of the key to update. +// - options - UpdateKeyOptions contains the optional parameters for the Client.UpdateKey method. +func (client *Client) UpdateKey(ctx context.Context, name string, version string, parameters UpdateKeyParameters, options *UpdateKeyOptions) (UpdateKeyResponse, error) { + req, err := client.updateKeyCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return UpdateKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return UpdateKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return UpdateKeyResponse{}, runtime.NewResponseError(resp) + } + return client.updateKeyHandleResponse(resp) +} + +// updateKeyCreateRequest creates the UpdateKey request. +func (client *Client) updateKeyCreateRequest(ctx context.Context, name string, version string, parameters UpdateKeyParameters, options *UpdateKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// updateKeyHandleResponse handles the UpdateKey response. +func (client *Client) updateKeyHandleResponse(resp *http.Response) (UpdateKeyResponse, error) { + result := UpdateKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return UpdateKeyResponse{}, err + } + return result, nil +} + +// UpdateKeyRotationPolicy - Set specified members in the key policy. Leave others as undefined. This operation requires the +// keys/update permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key in the given vault. +// - keyRotationPolicy - The policy for the key. +// - options - UpdateKeyRotationPolicyOptions contains the optional parameters for the Client.UpdateKeyRotationPolicy +// method. +func (client *Client) UpdateKeyRotationPolicy(ctx context.Context, name string, keyRotationPolicy KeyRotationPolicy, options *UpdateKeyRotationPolicyOptions) (UpdateKeyRotationPolicyResponse, error) { + req, err := client.updateKeyRotationPolicyCreateRequest(ctx, name, keyRotationPolicy, options) + if err != nil { + return UpdateKeyRotationPolicyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return UpdateKeyRotationPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return UpdateKeyRotationPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.updateKeyRotationPolicyHandleResponse(resp) +} + +// updateKeyRotationPolicyCreateRequest creates the UpdateKeyRotationPolicy request. +func (client *Client) updateKeyRotationPolicyCreateRequest(ctx context.Context, name string, keyRotationPolicy KeyRotationPolicy, options *UpdateKeyRotationPolicyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/rotationpolicy" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, keyRotationPolicy) +} + +// updateKeyRotationPolicyHandleResponse handles the UpdateKeyRotationPolicy response. +func (client *Client) updateKeyRotationPolicyHandleResponse(resp *http.Response) (UpdateKeyRotationPolicyResponse, error) { + result := UpdateKeyRotationPolicyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyRotationPolicy); err != nil { + return UpdateKeyRotationPolicyResponse{}, err + } + return result, nil +} + +// Verify - The VERIFY operation is applicable to symmetric keys stored in Azure Key Vault. VERIFY is not strictly necessary +// for asymmetric keys stored in Azure Key Vault since signature verification can be +// performed using the public portion of the key but this operation is supported as a convenience for callers that only have +// a key-reference and not the public portion of the key. This operation requires +// the keys/verify permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for verify operations. +// - options - VerifyOptions contains the optional parameters for the Client.Verify method. +func (client *Client) Verify(ctx context.Context, name string, version string, parameters VerifyParameters, options *VerifyOptions) (VerifyResponse, error) { + req, err := client.verifyCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return VerifyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return VerifyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VerifyResponse{}, runtime.NewResponseError(resp) + } + return client.verifyHandleResponse(resp) +} + +// verifyCreateRequest creates the Verify request. +func (client *Client) verifyCreateRequest(ctx context.Context, name string, version string, parameters VerifyParameters, options *VerifyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/verify" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// verifyHandleResponse handles the Verify response. +func (client *Client) verifyHandleResponse(resp *http.Response) (VerifyResponse, error) { + result := VerifyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyVerifyResult); err != nil { + return VerifyResponse{}, err + } + return result, nil +} + +// WrapKey - The WRAP operation supports encryption of a symmetric key using a key encryption key that has previously been +// stored in an Azure Key Vault. The WRAP operation is only strictly necessary for symmetric +// keys stored in Azure Key Vault since protection with an asymmetric key can be performed using the public portion of the +// key. This operation is supported for asymmetric keys as a convenience for +// callers that have a key-reference but do not have access to the public key material. This operation requires the keys/wrapKey +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.4 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for wrap operation. +// - options - WrapKeyOptions contains the optional parameters for the Client.WrapKey method. +func (client *Client) WrapKey(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *WrapKeyOptions) (WrapKeyResponse, error) { + req, err := client.wrapKeyCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return WrapKeyResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return WrapKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return WrapKeyResponse{}, runtime.NewResponseError(resp) + } + return client.wrapKeyHandleResponse(resp) +} + +// wrapKeyCreateRequest creates the WrapKey request. +func (client *Client) wrapKeyCreateRequest(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *WrapKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/wrapkey" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.4") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// wrapKeyHandleResponse handles the WrapKey response. +func (client *Client) wrapKeyHandleResponse(resp *http.Response) (WrapKeyResponse, error) { + result := WrapKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return WrapKeyResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/constants.go new file mode 100644 index 00000000..64960a40 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/constants.go @@ -0,0 +1,214 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azkeys + +// CurveName - Elliptic curve name. For valid values, see JsonWebKeyCurveName. +type CurveName string + +const ( + // CurveNameP256 - The NIST P-256 elliptic curve, AKA SECG curve SECP256R1. + CurveNameP256 CurveName = "P-256" + // CurveNameP256K - The SECG SECP256K1 elliptic curve. + CurveNameP256K CurveName = "P-256K" + // CurveNameP384 - The NIST P-384 elliptic curve, AKA SECG curve SECP384R1. + CurveNameP384 CurveName = "P-384" + // CurveNameP521 - The NIST P-521 elliptic curve, AKA SECG curve SECP521R1. + CurveNameP521 CurveName = "P-521" +) + +// PossibleCurveNameValues returns the possible values for the CurveName const type. +func PossibleCurveNameValues() []CurveName { + return []CurveName{ + CurveNameP256, + CurveNameP256K, + CurveNameP384, + CurveNameP521, + } +} + +// EncryptionAlgorithm - algorithm identifier +type EncryptionAlgorithm string + +const ( + EncryptionAlgorithmA128CBC EncryptionAlgorithm = "A128CBC" + EncryptionAlgorithmA128CBCPAD EncryptionAlgorithm = "A128CBCPAD" + EncryptionAlgorithmA128GCM EncryptionAlgorithm = "A128GCM" + EncryptionAlgorithmA128KW EncryptionAlgorithm = "A128KW" + EncryptionAlgorithmA192CBC EncryptionAlgorithm = "A192CBC" + EncryptionAlgorithmA192CBCPAD EncryptionAlgorithm = "A192CBCPAD" + EncryptionAlgorithmA192GCM EncryptionAlgorithm = "A192GCM" + EncryptionAlgorithmA192KW EncryptionAlgorithm = "A192KW" + EncryptionAlgorithmA256CBC EncryptionAlgorithm = "A256CBC" + EncryptionAlgorithmA256CBCPAD EncryptionAlgorithm = "A256CBCPAD" + EncryptionAlgorithmA256GCM EncryptionAlgorithm = "A256GCM" + EncryptionAlgorithmA256KW EncryptionAlgorithm = "A256KW" + EncryptionAlgorithmRSA15 EncryptionAlgorithm = "RSA1_5" + EncryptionAlgorithmRSAOAEP EncryptionAlgorithm = "RSA-OAEP" + EncryptionAlgorithmRSAOAEP256 EncryptionAlgorithm = "RSA-OAEP-256" +) + +// PossibleEncryptionAlgorithmValues returns the possible values for the EncryptionAlgorithm const type. +func PossibleEncryptionAlgorithmValues() []EncryptionAlgorithm { + return []EncryptionAlgorithm{ + EncryptionAlgorithmA128CBC, + EncryptionAlgorithmA128CBCPAD, + EncryptionAlgorithmA128GCM, + EncryptionAlgorithmA128KW, + EncryptionAlgorithmA192CBC, + EncryptionAlgorithmA192CBCPAD, + EncryptionAlgorithmA192GCM, + EncryptionAlgorithmA192KW, + EncryptionAlgorithmA256CBC, + EncryptionAlgorithmA256CBCPAD, + EncryptionAlgorithmA256GCM, + EncryptionAlgorithmA256KW, + EncryptionAlgorithmRSA15, + EncryptionAlgorithmRSAOAEP, + EncryptionAlgorithmRSAOAEP256, + } +} + +// KeyOperation - JSON web key operations. For more information, see JsonWebKeyOperation. +type KeyOperation string + +const ( + KeyOperationDecrypt KeyOperation = "decrypt" + KeyOperationEncrypt KeyOperation = "encrypt" + + KeyOperationImport KeyOperation = "import" + KeyOperationSign KeyOperation = "sign" + KeyOperationUnwrapKey KeyOperation = "unwrapKey" + KeyOperationVerify KeyOperation = "verify" + KeyOperationWrapKey KeyOperation = "wrapKey" +) + +// PossibleKeyOperationValues returns the possible values for the KeyOperation const type. +func PossibleKeyOperationValues() []KeyOperation { + return []KeyOperation{ + KeyOperationDecrypt, + KeyOperationEncrypt, + + KeyOperationImport, + KeyOperationSign, + KeyOperationUnwrapKey, + KeyOperationVerify, + KeyOperationWrapKey, + } +} + +// SignatureAlgorithm - The signing/verification algorithm identifier. For more information on possible algorithm +// types, see JsonWebKeySignatureAlgorithm. +type SignatureAlgorithm string + +const ( + // SignatureAlgorithmES256 - ECDSA using P-256 and SHA-256, as described in https://tools.ietf.org/html/rfc7518. + SignatureAlgorithmES256 SignatureAlgorithm = "ES256" + // SignatureAlgorithmES256K - ECDSA using P-256K and SHA-256, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmES256K SignatureAlgorithm = "ES256K" + // SignatureAlgorithmES384 - ECDSA using P-384 and SHA-384, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmES384 SignatureAlgorithm = "ES384" + // SignatureAlgorithmES512 - ECDSA using P-521 and SHA-512, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmES512 SignatureAlgorithm = "ES512" + // SignatureAlgorithmPS256 - RSASSA-PSS using SHA-256 and MGF1 with SHA-256, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmPS256 SignatureAlgorithm = "PS256" + // SignatureAlgorithmPS384 - RSASSA-PSS using SHA-384 and MGF1 with SHA-384, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmPS384 SignatureAlgorithm = "PS384" + // SignatureAlgorithmPS512 - RSASSA-PSS using SHA-512 and MGF1 with SHA-512, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmPS512 SignatureAlgorithm = "PS512" + // SignatureAlgorithmRS256 - RSASSA-PKCS1-v1_5 using SHA-256, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmRS256 SignatureAlgorithm = "RS256" + // SignatureAlgorithmRS384 - RSASSA-PKCS1-v1_5 using SHA-384, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmRS384 SignatureAlgorithm = "RS384" + // SignatureAlgorithmRS512 - RSASSA-PKCS1-v1_5 using SHA-512, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmRS512 SignatureAlgorithm = "RS512" +) + +// PossibleSignatureAlgorithmValues returns the possible values for the SignatureAlgorithm const type. +func PossibleSignatureAlgorithmValues() []SignatureAlgorithm { + return []SignatureAlgorithm{ + SignatureAlgorithmES256, + SignatureAlgorithmES256K, + SignatureAlgorithmES384, + SignatureAlgorithmES512, + SignatureAlgorithmPS256, + SignatureAlgorithmPS384, + SignatureAlgorithmPS512, + SignatureAlgorithmRS256, + SignatureAlgorithmRS384, + SignatureAlgorithmRS512, + } +} + +// KeyType - JsonWebKey Key Type (kty), as defined in https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40. +type KeyType string + +const ( + // KeyTypeEC - Elliptic Curve. + KeyTypeEC KeyType = "EC" + // KeyTypeECHSM - Elliptic Curve with a private key which is stored in the HSM. + KeyTypeECHSM KeyType = "EC-HSM" + // KeyTypeOct - Octet sequence (used to represent symmetric keys) + KeyTypeOct KeyType = "oct" + // KeyTypeOctHSM - Octet sequence (used to represent symmetric keys) which is stored the HSM. + KeyTypeOctHSM KeyType = "oct-HSM" + // KeyTypeRSA - RSA (https://tools.ietf.org/html/rfc3447) + KeyTypeRSA KeyType = "RSA" + // KeyTypeRSAHSM - RSA with a private key which is stored in the HSM. + KeyTypeRSAHSM KeyType = "RSA-HSM" +) + +// PossibleKeyTypeValues returns the possible values for the KeyType const type. +func PossibleKeyTypeValues() []KeyType { + return []KeyType{ + KeyTypeEC, + KeyTypeECHSM, + KeyTypeOct, + KeyTypeOctHSM, + KeyTypeRSA, + KeyTypeRSAHSM, + } +} + +// KeyEncryptionAlgorithm - The encryption algorithm to use to protected the exported key material +type KeyEncryptionAlgorithm string + +const ( + KeyEncryptionAlgorithmCKMRSAAESKEYWRAP KeyEncryptionAlgorithm = "CKM_RSA_AES_KEY_WRAP" + KeyEncryptionAlgorithmRSAAESKEYWRAP256 KeyEncryptionAlgorithm = "RSA_AES_KEY_WRAP_256" + KeyEncryptionAlgorithmRSAAESKEYWRAP384 KeyEncryptionAlgorithm = "RSA_AES_KEY_WRAP_384" +) + +// PossibleKeyEncryptionAlgorithmValues returns the possible values for the KeyEncryptionAlgorithm const type. +func PossibleKeyEncryptionAlgorithmValues() []KeyEncryptionAlgorithm { + return []KeyEncryptionAlgorithm{ + KeyEncryptionAlgorithmCKMRSAAESKEYWRAP, + KeyEncryptionAlgorithmRSAAESKEYWRAP256, + KeyEncryptionAlgorithmRSAAESKEYWRAP384, + } +} + +// KeyRotationPolicyAction - The type of the action. +type KeyRotationPolicyAction string + +const ( + // KeyRotationPolicyActionRotate - Rotate the key based on the key policy. + KeyRotationPolicyActionRotate KeyRotationPolicyAction = "rotate" + // KeyRotationPolicyActionNotify - Trigger event grid events. For preview, the notification time is not configurable and it + // is default to 30 days before expiry. + KeyRotationPolicyActionNotify KeyRotationPolicyAction = "notify" +) + +// PossibleKeyRotationPolicyActionValues returns the possible values for the KeyRotationPolicyAction const type. +func PossibleKeyRotationPolicyActionValues() []KeyRotationPolicyAction { + return []KeyRotationPolicyAction{ + KeyRotationPolicyActionRotate, + KeyRotationPolicyActionNotify, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go new file mode 100644 index 00000000..edcd0d52 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go @@ -0,0 +1,63 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azkeys + +// this file contains handwritten additions to the generated code + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal" +) + +// ClientOptions contains optional settings for Client. +type ClientOptions struct { + azcore.ClientOptions + + // DisableChallengeResourceVerification controls whether the policy requires the + // authentication challenge resource to match the Key Vault or Managed HSM domain. + // See https://aka.ms/azsdk/blog/vault-uri for more information. + DisableChallengeResourceVerification bool +} + +// NewClient creates a client that accesses a Key Vault's keys. You should validate that vaultURL +// references a valid Key Vault or Managed HSM. See https://aka.ms/azsdk/blog/vault-uri for details. +func NewClient(vaultURL string, credential azcore.TokenCredential, options *ClientOptions) (*Client, error) { + if options == nil { + options = &ClientOptions{} + } + authPolicy := internal.NewKeyVaultChallengePolicy( + credential, + &internal.KeyVaultChallengePolicyOptions{ + DisableChallengeResourceVerification: options.DisableChallengeResourceVerification, + }, + ) + azcoreClient, err := azcore.NewClient("azkeys.Client", version, runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}, &options.ClientOptions) + if err != nil { + return nil, err + } + return &Client{endpoint: vaultURL, internal: azcoreClient}, nil +} + +// ID is a key's unique ID, containing its version, if any, and name. +type ID string + +// Name of the key. +func (i *ID) Name() string { + _, name, _ := internal.ParseID((*string)(i)) + return *name +} + +// Version of the key. This returns an empty string when the ID contains no version. +func (i *ID) Version() string { + _, _, version := internal.ParseID((*string)(i)) + if version == nil { + return "" + } + return *version +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models.go new file mode 100644 index 00000000..c81b7a75 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models.go @@ -0,0 +1,548 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azkeys + +import "time" + +// BackupKeyResult - The backup key result, containing the backup blob. +type BackupKeyResult struct { + // READ-ONLY; The backup blob containing the backed up key. + Value []byte `json:"value,omitempty" azure:"ro"` +} + +// BackupKeyOptions contains the optional parameters for the Client.BackupKey method. +type BackupKeyOptions struct { + // placeholder for future optional parameters +} + +// CreateKeyOptions contains the optional parameters for the Client.CreateKey method. +type CreateKeyOptions struct { + // placeholder for future optional parameters +} + +// DecryptOptions contains the optional parameters for the Client.Decrypt method. +type DecryptOptions struct { + // placeholder for future optional parameters +} + +// DeleteKeyOptions contains the optional parameters for the Client.DeleteKey method. +type DeleteKeyOptions struct { + // placeholder for future optional parameters +} + +// EncryptOptions contains the optional parameters for the Client.Encrypt method. +type EncryptOptions struct { + // placeholder for future optional parameters +} + +// GetDeletedKeyOptions contains the optional parameters for the Client.GetDeletedKey method. +type GetDeletedKeyOptions struct { + // placeholder for future optional parameters +} + +// GetKeyOptions contains the optional parameters for the Client.GetKey method. +type GetKeyOptions struct { + // placeholder for future optional parameters +} + +// GetKeyRotationPolicyOptions contains the optional parameters for the Client.GetKeyRotationPolicy method. +type GetKeyRotationPolicyOptions struct { + // placeholder for future optional parameters +} + +// GetRandomBytesOptions contains the optional parameters for the Client.GetRandomBytes method. +type GetRandomBytesOptions struct { + // placeholder for future optional parameters +} + +// ImportKeyOptions contains the optional parameters for the Client.ImportKey method. +type ImportKeyOptions struct { + // placeholder for future optional parameters +} + +// ListDeletedKeyPropertiesOptions contains the optional parameters for the Client.NewListDeletedKeyPropertiesPager +// method. +type ListDeletedKeyPropertiesOptions struct { + // placeholder for future optional parameters +} + +// ListKeyPropertiesOptions contains the optional parameters for the Client.NewListKeyPropertiesPager method. +type ListKeyPropertiesOptions struct { + // placeholder for future optional parameters +} + +// ListKeyPropertiesVersionsOptions contains the optional parameters for the Client.NewListKeyPropertiesVersionsPager +// method. +type ListKeyPropertiesVersionsOptions struct { + // placeholder for future optional parameters +} + +// PurgeDeletedKeyOptions contains the optional parameters for the Client.PurgeDeletedKey method. +type PurgeDeletedKeyOptions struct { + // placeholder for future optional parameters +} + +// RecoverDeletedKeyOptions contains the optional parameters for the Client.RecoverDeletedKey method. +type RecoverDeletedKeyOptions struct { + // placeholder for future optional parameters +} + +// ReleaseOptions contains the optional parameters for the Client.Release method. +type ReleaseOptions struct { + // placeholder for future optional parameters +} + +// RestoreKeyOptions contains the optional parameters for the Client.RestoreKey method. +type RestoreKeyOptions struct { + // placeholder for future optional parameters +} + +// RotateKeyOptions contains the optional parameters for the Client.RotateKey method. +type RotateKeyOptions struct { + // placeholder for future optional parameters +} + +// SignOptions contains the optional parameters for the Client.Sign method. +type SignOptions struct { + // placeholder for future optional parameters +} + +// UnwrapKeyOptions contains the optional parameters for the Client.UnwrapKey method. +type UnwrapKeyOptions struct { + // placeholder for future optional parameters +} + +// UpdateKeyOptions contains the optional parameters for the Client.UpdateKey method. +type UpdateKeyOptions struct { + // placeholder for future optional parameters +} + +// UpdateKeyRotationPolicyOptions contains the optional parameters for the Client.UpdateKeyRotationPolicy method. +type UpdateKeyRotationPolicyOptions struct { + // placeholder for future optional parameters +} + +// VerifyOptions contains the optional parameters for the Client.Verify method. +type VerifyOptions struct { + // placeholder for future optional parameters +} + +// WrapKeyOptions contains the optional parameters for the Client.WrapKey method. +type WrapKeyOptions struct { + // placeholder for future optional parameters +} + +// CreateKeyParameters - The key create parameters. +type CreateKeyParameters struct { + // REQUIRED; The type of key to create. For valid values, see JsonWebKeyType. + Kty *KeyType `json:"kty,omitempty"` + + // Elliptic curve name. For valid values, see JsonWebKeyCurveName. + Curve *CurveName `json:"crv,omitempty"` + + // The attributes of a key managed by the key vault service. + KeyAttributes *KeyAttributes `json:"attributes,omitempty"` + KeyOps []*KeyOperation `json:"key_ops,omitempty"` + + // The key size in bits. For example: 2048, 3072, or 4096 for RSA. + KeySize *int32 `json:"key_size,omitempty"` + + // The public exponent for a RSA key. + PublicExponent *int32 `json:"public_exponent,omitempty"` + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy `json:"release_policy,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` +} + +// DeletedKey - A DeletedKeyBundle consisting of a WebKey plus its Attributes and deletion info +type DeletedKey struct { + // The key management attributes. + Attributes *KeyAttributes `json:"attributes,omitempty"` + + // The Json web key. + Key *JSONWebKey `json:"key,omitempty"` + + // The url of the recovery object, used to identify and recover the deleted key. + RecoveryID *string `json:"recoveryId,omitempty"` + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy `json:"release_policy,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; The time when the key was deleted, in UTC + DeletedDate *time.Time `json:"deletedDate,omitempty" azure:"ro"` + + // READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will + // be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` + + // READ-ONLY; The time when the key is scheduled to be purged, in UTC + ScheduledPurgeDate *time.Time `json:"scheduledPurgeDate,omitempty" azure:"ro"` +} + +// DeletedKeyProperties - The deleted key item containing the deleted key metadata and information about deletion. +type DeletedKeyProperties struct { + // The key management attributes. + Attributes *KeyAttributes `json:"attributes,omitempty"` + + // Key identifier. + KID *ID `json:"kid,omitempty"` + + // The url of the recovery object, used to identify and recover the deleted key. + RecoveryID *string `json:"recoveryId,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; The time when the key was deleted, in UTC + DeletedDate *time.Time `json:"deletedDate,omitempty" azure:"ro"` + + // READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will + // be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` + + // READ-ONLY; The time when the key is scheduled to be purged, in UTC + ScheduledPurgeDate *time.Time `json:"scheduledPurgeDate,omitempty" azure:"ro"` +} + +// DeletedKeyPropertiesListResult - A list of keys that have been deleted in this vault. +type DeletedKeyPropertiesListResult struct { + // READ-ONLY; The URL to get the next set of deleted keys. + NextLink *string `json:"nextLink,omitempty" azure:"ro"` + + // READ-ONLY; A response message containing a list of deleted keys in the vault along with a link to the next page of deleted + // keys + Value []*DeletedKeyProperties `json:"value,omitempty" azure:"ro"` +} + +// GetRandomBytesParameters - The get random bytes request object. +type GetRandomBytesParameters struct { + // REQUIRED; The requested number of random bytes. + Count *int32 `json:"count,omitempty"` +} + +// ImportKeyParameters - The key import parameters. +type ImportKeyParameters struct { + // REQUIRED; The Json web key + Key *JSONWebKey `json:"key,omitempty"` + + // Whether to import as a hardware key (HSM) or software key. + HSM *bool `json:"Hsm,omitempty"` + + // The key management attributes. + KeyAttributes *KeyAttributes `json:"attributes,omitempty"` + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy `json:"release_policy,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` +} + +// JSONWebKey - As of http://tools.ietf.org/html/draft-ietf-jose-json-web-key-18 +type JSONWebKey struct { + // Elliptic curve name. For valid values, see JsonWebKeyCurveName. + Crv *CurveName `json:"crv,omitempty"` + + // RSA private exponent, or the D component of an EC private key. + D []byte `json:"d,omitempty"` + + // RSA private key parameter. + DP []byte `json:"dp,omitempty"` + + // RSA private key parameter. + DQ []byte `json:"dq,omitempty"` + + // RSA public exponent. + E []byte `json:"e,omitempty"` + + // Symmetric key. + K []byte `json:"k,omitempty"` + + // Key identifier. + KID *ID `json:"kid,omitempty"` + KeyOps []*KeyOperation `json:"key_ops,omitempty"` + + // JsonWebKey Key Type (kty), as defined in https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40. + Kty *KeyType `json:"kty,omitempty"` + + // RSA modulus. + N []byte `json:"n,omitempty"` + + // RSA secret prime. + P []byte `json:"p,omitempty"` + + // RSA secret prime, with p < q. + Q []byte `json:"q,omitempty"` + + // RSA private key parameter. + QI []byte `json:"qi,omitempty"` + + // Protected Key, used with 'Bring Your Own Key'. + T []byte `json:"key_hsm,omitempty"` + + // X component of an EC public key. + X []byte `json:"x,omitempty"` + + // Y component of an EC public key. + Y []byte `json:"y,omitempty"` +} + +// KeyAttributes - The attributes of a key managed by the key vault service. +type KeyAttributes struct { + // Determines whether the object is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // Expiry date in UTC. + Expires *time.Time `json:"exp,omitempty"` + + // Indicates if the private key can be exported. Release policy must be provided when creating the first version of an exportable + // key. + Exportable *bool `json:"exportable,omitempty"` + + // Not before date in UTC. + NotBefore *time.Time `json:"nbf,omitempty"` + + // READ-ONLY; Creation time in UTC. + Created *time.Time `json:"created,omitempty" azure:"ro"` + + // READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. + RecoverableDays *int32 `json:"recoverableDays,omitempty" azure:"ro"` + + // READ-ONLY; Reflects the deletion recovery level currently in effect for keys in the current vault. If it contains 'Purgeable' + // the key can be permanently deleted by a privileged user; otherwise, only the system + // can purge the key, at the end of the retention interval. + RecoveryLevel *string `json:"recoveryLevel,omitempty" azure:"ro"` + + // READ-ONLY; Last updated time in UTC. + Updated *time.Time `json:"updated,omitempty" azure:"ro"` +} + +// KeyBundle - A KeyBundle consisting of a WebKey plus its attributes. +type KeyBundle struct { + // The key management attributes. + Attributes *KeyAttributes `json:"attributes,omitempty"` + + // The Json web key. + Key *JSONWebKey `json:"key,omitempty"` + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy `json:"release_policy,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will + // be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` +} + +// KeyOperationParameters - The key operations parameters. +type KeyOperationParameters struct { + // REQUIRED; algorithm identifier + Algorithm *EncryptionAlgorithm `json:"alg,omitempty"` + + // REQUIRED + Value []byte `json:"value,omitempty"` + + // Additional data to authenticate but not encrypt/decrypt when using authenticated crypto algorithms. + AdditionalAuthenticatedData []byte `json:"aad,omitempty"` + + // The tag to authenticate when performing decryption with an authenticated algorithm. + AuthenticationTag []byte `json:"tag,omitempty"` + + // Cryptographically random, non-repeating initialization vector for symmetric algorithms. + IV []byte `json:"iv,omitempty"` +} + +// KeyOperationResult - The key operation result. +type KeyOperationResult struct { + // READ-ONLY + AdditionalAuthenticatedData []byte `json:"aad,omitempty" azure:"ro"` + + // READ-ONLY + AuthenticationTag []byte `json:"tag,omitempty" azure:"ro"` + + // READ-ONLY + IV []byte `json:"iv,omitempty" azure:"ro"` + + // READ-ONLY; Key identifier + KID *ID `json:"kid,omitempty" azure:"ro"` + + // READ-ONLY + Result []byte `json:"value,omitempty" azure:"ro"` +} + +// KeyProperties - The key item containing key metadata. +type KeyProperties struct { + // The key management attributes. + Attributes *KeyAttributes `json:"attributes,omitempty"` + + // Key identifier. + KID *ID `json:"kid,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will + // be true. + Managed *bool `json:"managed,omitempty" azure:"ro"` +} + +// KeyPropertiesListResult - The key list result. +type KeyPropertiesListResult struct { + // READ-ONLY; The URL to get the next set of keys. + NextLink *string `json:"nextLink,omitempty" azure:"ro"` + + // READ-ONLY; A response message containing a list of keys in the key vault along with a link to the next page of keys. + Value []*KeyProperties `json:"value,omitempty" azure:"ro"` +} + +// KeyReleasePolicy - The policy rules under which the key can be exported. +type KeyReleasePolicy struct { + // Content type and version of key release policy + ContentType *string `json:"contentType,omitempty"` + + // Blob encoding the policy rules under which the key can be released. Blob must be base64 URL encoded. + EncodedPolicy []byte `json:"data,omitempty"` + + // Defines the mutability state of the policy. Once marked immutable, this flag cannot be reset and the policy cannot be changed + // under any circumstances. + Immutable *bool `json:"immutable,omitempty"` +} + +// KeyReleaseResult - The release result, containing the released key. +type KeyReleaseResult struct { + // READ-ONLY; A signed object containing the released key. + Value *string `json:"value,omitempty" azure:"ro"` +} + +// KeyRotationPolicy - Management policy for a key. +type KeyRotationPolicy struct { + // The key rotation policy attributes. + Attributes *KeyRotationPolicyAttributes `json:"attributes,omitempty"` + + // Actions that will be performed by Key Vault over the lifetime of a key. For preview, lifetimeActions can only have two + // items at maximum: one for rotate, one for notify. Notification time would be + // default to 30 days before expiry and it is not configurable. + LifetimeActions []*LifetimeAction `json:"lifetimeActions,omitempty"` + + // READ-ONLY; The key policy id. + ID *string `json:"id,omitempty" azure:"ro"` +} + +// KeyRotationPolicyAttributes - The key rotation policy attributes. +type KeyRotationPolicyAttributes struct { + // The expiryTime will be applied on the new key version. It should be at least 28 days. It will be in ISO 8601 Format. Examples: + // 90 days: P90D, 3 months: P3M, 48 hours: PT48H, 1 year and 10 days: P1Y10D + ExpiryTime *string `json:"expiryTime,omitempty"` + + // READ-ONLY; The key rotation policy created time in UTC. + Created *time.Time `json:"created,omitempty" azure:"ro"` + + // READ-ONLY; The key rotation policy's last updated time in UTC. + Updated *time.Time `json:"updated,omitempty" azure:"ro"` +} + +// KeyVerifyResult - The key verify result. +type KeyVerifyResult struct { + // READ-ONLY; True if the signature is verified, otherwise false. + Value *bool `json:"value,omitempty" azure:"ro"` +} + +// LifetimeAction - Action and its trigger that will be performed by Key Vault over the lifetime of a key. +type LifetimeAction struct { + // The action that will be executed. + Action *LifetimeActionType `json:"action,omitempty"` + + // The condition that will execute the action. + Trigger *LifetimeActionTrigger `json:"trigger,omitempty"` +} + +// LifetimeActionTrigger - A condition to be satisfied for an action to be executed. +type LifetimeActionTrigger struct { + // Time after creation to attempt to rotate. It only applies to rotate. It will be in ISO 8601 duration format. Example: 90 + // days : "P90D" + TimeAfterCreate *string `json:"timeAfterCreate,omitempty"` + + // Time before expiry to attempt to rotate or notify. It will be in ISO 8601 duration format. Example: 90 days : "P90D" + TimeBeforeExpiry *string `json:"timeBeforeExpiry,omitempty"` +} + +// LifetimeActionType - The action that will be executed. +type LifetimeActionType struct { + // The type of the action. + Type *KeyRotationPolicyAction `json:"type,omitempty"` +} + +// RandomBytes - The get random bytes response object containing the bytes. +type RandomBytes struct { + // REQUIRED; The bytes encoded as a base64url string. + Value []byte `json:"value,omitempty"` +} + +// ReleaseParameters - The release key parameters. +type ReleaseParameters struct { + // REQUIRED; The attestation assertion for the target of the key release. + TargetAttestationToken *string `json:"target,omitempty"` + + // The encryption algorithm to use to protected the exported key material + Algorithm *KeyEncryptionAlgorithm `json:"enc,omitempty"` + + // A client provided nonce for freshness. + Nonce *string `json:"nonce,omitempty"` +} + +// RestoreKeyParameters - The key restore parameters. +type RestoreKeyParameters struct { + // REQUIRED; The backup blob associated with a key bundle. + KeyBackup []byte `json:"value,omitempty"` +} + +// SignParameters - The key operations parameters. +type SignParameters struct { + // REQUIRED; The signing/verification algorithm identifier. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. + Algorithm *SignatureAlgorithm `json:"alg,omitempty"` + + // REQUIRED + Value []byte `json:"value,omitempty"` +} + +// UpdateKeyParameters - The key update parameters. +type UpdateKeyParameters struct { + // The attributes of a key managed by the key vault service. + KeyAttributes *KeyAttributes `json:"attributes,omitempty"` + + // Json web key operations. For more information on possible key operations, see JsonWebKeyOperation. + KeyOps []*KeyOperation `json:"key_ops,omitempty"` + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy `json:"release_policy,omitempty"` + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string `json:"tags,omitempty"` +} + +// VerifyParameters - The key verify parameters. +type VerifyParameters struct { + // REQUIRED; The signing/verification algorithm. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. + Algorithm *SignatureAlgorithm `json:"alg,omitempty"` + + // REQUIRED; The digest used for signing. + Digest []byte `json:"digest,omitempty"` + + // REQUIRED; The signature to be verified. + Signature []byte `json:"value,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models_serde.go new file mode 100644 index 00000000..2bcbe556 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models_serde.go @@ -0,0 +1,1120 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azkeys + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "reflect" +) + +// MarshalJSON implements the json.Marshaller interface for type BackupKeyResult. +func (b BackupKeyResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", b.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BackupKeyResult. +func (b *BackupKeyResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &b.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CreateKeyParameters. +func (c CreateKeyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "crv", c.Curve) + populate(objectMap, "attributes", c.KeyAttributes) + populate(objectMap, "key_ops", c.KeyOps) + populate(objectMap, "key_size", c.KeySize) + populate(objectMap, "kty", c.Kty) + populate(objectMap, "public_exponent", c.PublicExponent) + populate(objectMap, "release_policy", c.ReleasePolicy) + populate(objectMap, "tags", c.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CreateKeyParameters. +func (c *CreateKeyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "crv": + err = unpopulate(val, "Curve", &c.Curve) + delete(rawMsg, key) + case "attributes": + err = unpopulate(val, "KeyAttributes", &c.KeyAttributes) + delete(rawMsg, key) + case "key_ops": + err = unpopulate(val, "KeyOps", &c.KeyOps) + delete(rawMsg, key) + case "key_size": + err = unpopulate(val, "KeySize", &c.KeySize) + delete(rawMsg, key) + case "kty": + err = unpopulate(val, "Kty", &c.Kty) + delete(rawMsg, key) + case "public_exponent": + err = unpopulate(val, "PublicExponent", &c.PublicExponent) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &c.ReleasePolicy) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &c.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedKey. +func (d DeletedKey) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", d.Attributes) + populateTimeUnix(objectMap, "deletedDate", d.DeletedDate) + populate(objectMap, "key", d.Key) + populate(objectMap, "managed", d.Managed) + populate(objectMap, "recoveryId", d.RecoveryID) + populate(objectMap, "release_policy", d.ReleasePolicy) + populateTimeUnix(objectMap, "scheduledPurgeDate", d.ScheduledPurgeDate) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedKey. +func (d *DeletedKey) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &d.Attributes) + delete(rawMsg, key) + case "deletedDate": + err = unpopulateTimeUnix(val, "DeletedDate", &d.DeletedDate) + delete(rawMsg, key) + case "key": + err = unpopulate(val, "Key", &d.Key) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &d.Managed) + delete(rawMsg, key) + case "recoveryId": + err = unpopulate(val, "RecoveryID", &d.RecoveryID) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &d.ReleasePolicy) + delete(rawMsg, key) + case "scheduledPurgeDate": + err = unpopulateTimeUnix(val, "ScheduledPurgeDate", &d.ScheduledPurgeDate) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &d.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedKeyProperties. +func (d DeletedKeyProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", d.Attributes) + populateTimeUnix(objectMap, "deletedDate", d.DeletedDate) + populate(objectMap, "kid", d.KID) + populate(objectMap, "managed", d.Managed) + populate(objectMap, "recoveryId", d.RecoveryID) + populateTimeUnix(objectMap, "scheduledPurgeDate", d.ScheduledPurgeDate) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedKeyProperties. +func (d *DeletedKeyProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &d.Attributes) + delete(rawMsg, key) + case "deletedDate": + err = unpopulateTimeUnix(val, "DeletedDate", &d.DeletedDate) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &d.KID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &d.Managed) + delete(rawMsg, key) + case "recoveryId": + err = unpopulate(val, "RecoveryID", &d.RecoveryID) + delete(rawMsg, key) + case "scheduledPurgeDate": + err = unpopulateTimeUnix(val, "ScheduledPurgeDate", &d.ScheduledPurgeDate) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &d.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedKeyPropertiesListResult. +func (d DeletedKeyPropertiesListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", d.NextLink) + populate(objectMap, "value", d.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedKeyPropertiesListResult. +func (d *DeletedKeyPropertiesListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &d.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &d.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GetRandomBytesParameters. +func (g GetRandomBytesParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "count", g.Count) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GetRandomBytesParameters. +func (g *GetRandomBytesParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "count": + err = unpopulate(val, "Count", &g.Count) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImportKeyParameters. +func (i ImportKeyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "Hsm", i.HSM) + populate(objectMap, "key", i.Key) + populate(objectMap, "attributes", i.KeyAttributes) + populate(objectMap, "release_policy", i.ReleasePolicy) + populate(objectMap, "tags", i.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImportKeyParameters. +func (i *ImportKeyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "Hsm": + err = unpopulate(val, "HSM", &i.HSM) + delete(rawMsg, key) + case "key": + err = unpopulate(val, "Key", &i.Key) + delete(rawMsg, key) + case "attributes": + err = unpopulate(val, "KeyAttributes", &i.KeyAttributes) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &i.ReleasePolicy) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &i.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type JSONWebKey. +func (j JSONWebKey) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "crv", j.Crv) + populateByteArray(objectMap, "d", j.D, runtime.Base64URLFormat) + populateByteArray(objectMap, "dp", j.DP, runtime.Base64URLFormat) + populateByteArray(objectMap, "dq", j.DQ, runtime.Base64URLFormat) + populateByteArray(objectMap, "e", j.E, runtime.Base64URLFormat) + populateByteArray(objectMap, "k", j.K, runtime.Base64URLFormat) + populate(objectMap, "kid", j.KID) + populate(objectMap, "key_ops", j.KeyOps) + populate(objectMap, "kty", j.Kty) + populateByteArray(objectMap, "n", j.N, runtime.Base64URLFormat) + populateByteArray(objectMap, "p", j.P, runtime.Base64URLFormat) + populateByteArray(objectMap, "q", j.Q, runtime.Base64URLFormat) + populateByteArray(objectMap, "qi", j.QI, runtime.Base64URLFormat) + populateByteArray(objectMap, "key_hsm", j.T, runtime.Base64URLFormat) + populateByteArray(objectMap, "x", j.X, runtime.Base64URLFormat) + populateByteArray(objectMap, "y", j.Y, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type JSONWebKey. +func (j *JSONWebKey) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "crv": + err = unpopulate(val, "Crv", &j.Crv) + delete(rawMsg, key) + case "d": + err = runtime.DecodeByteArray(string(val), &j.D, runtime.Base64URLFormat) + delete(rawMsg, key) + case "dp": + err = runtime.DecodeByteArray(string(val), &j.DP, runtime.Base64URLFormat) + delete(rawMsg, key) + case "dq": + err = runtime.DecodeByteArray(string(val), &j.DQ, runtime.Base64URLFormat) + delete(rawMsg, key) + case "e": + err = runtime.DecodeByteArray(string(val), &j.E, runtime.Base64URLFormat) + delete(rawMsg, key) + case "k": + err = runtime.DecodeByteArray(string(val), &j.K, runtime.Base64URLFormat) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &j.KID) + delete(rawMsg, key) + case "key_ops": + err = unpopulate(val, "KeyOps", &j.KeyOps) + delete(rawMsg, key) + case "kty": + err = unpopulate(val, "Kty", &j.Kty) + delete(rawMsg, key) + case "n": + err = runtime.DecodeByteArray(string(val), &j.N, runtime.Base64URLFormat) + delete(rawMsg, key) + case "p": + err = runtime.DecodeByteArray(string(val), &j.P, runtime.Base64URLFormat) + delete(rawMsg, key) + case "q": + err = runtime.DecodeByteArray(string(val), &j.Q, runtime.Base64URLFormat) + delete(rawMsg, key) + case "qi": + err = runtime.DecodeByteArray(string(val), &j.QI, runtime.Base64URLFormat) + delete(rawMsg, key) + case "key_hsm": + err = runtime.DecodeByteArray(string(val), &j.T, runtime.Base64URLFormat) + delete(rawMsg, key) + case "x": + err = runtime.DecodeByteArray(string(val), &j.X, runtime.Base64URLFormat) + delete(rawMsg, key) + case "y": + err = runtime.DecodeByteArray(string(val), &j.Y, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyAttributes. +func (k KeyAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateTimeUnix(objectMap, "created", k.Created) + populate(objectMap, "enabled", k.Enabled) + populateTimeUnix(objectMap, "exp", k.Expires) + populate(objectMap, "exportable", k.Exportable) + populateTimeUnix(objectMap, "nbf", k.NotBefore) + populate(objectMap, "recoverableDays", k.RecoverableDays) + populate(objectMap, "recoveryLevel", k.RecoveryLevel) + populateTimeUnix(objectMap, "updated", k.Updated) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyAttributes. +func (k *KeyAttributes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "created": + err = unpopulateTimeUnix(val, "Created", &k.Created) + delete(rawMsg, key) + case "enabled": + err = unpopulate(val, "Enabled", &k.Enabled) + delete(rawMsg, key) + case "exp": + err = unpopulateTimeUnix(val, "Expires", &k.Expires) + delete(rawMsg, key) + case "exportable": + err = unpopulate(val, "Exportable", &k.Exportable) + delete(rawMsg, key) + case "nbf": + err = unpopulateTimeUnix(val, "NotBefore", &k.NotBefore) + delete(rawMsg, key) + case "recoverableDays": + err = unpopulate(val, "RecoverableDays", &k.RecoverableDays) + delete(rawMsg, key) + case "recoveryLevel": + err = unpopulate(val, "RecoveryLevel", &k.RecoveryLevel) + delete(rawMsg, key) + case "updated": + err = unpopulateTimeUnix(val, "Updated", &k.Updated) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyBundle. +func (k KeyBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", k.Attributes) + populate(objectMap, "key", k.Key) + populate(objectMap, "managed", k.Managed) + populate(objectMap, "release_policy", k.ReleasePolicy) + populate(objectMap, "tags", k.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyBundle. +func (k *KeyBundle) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &k.Attributes) + delete(rawMsg, key) + case "key": + err = unpopulate(val, "Key", &k.Key) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &k.Managed) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &k.ReleasePolicy) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &k.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyOperationParameters. +func (k KeyOperationParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "aad", k.AdditionalAuthenticatedData, runtime.Base64URLFormat) + populate(objectMap, "alg", k.Algorithm) + populateByteArray(objectMap, "tag", k.AuthenticationTag, runtime.Base64URLFormat) + populateByteArray(objectMap, "iv", k.IV, runtime.Base64URLFormat) + populateByteArray(objectMap, "value", k.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyOperationParameters. +func (k *KeyOperationParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "aad": + err = runtime.DecodeByteArray(string(val), &k.AdditionalAuthenticatedData, runtime.Base64URLFormat) + delete(rawMsg, key) + case "alg": + err = unpopulate(val, "Algorithm", &k.Algorithm) + delete(rawMsg, key) + case "tag": + err = runtime.DecodeByteArray(string(val), &k.AuthenticationTag, runtime.Base64URLFormat) + delete(rawMsg, key) + case "iv": + err = runtime.DecodeByteArray(string(val), &k.IV, runtime.Base64URLFormat) + delete(rawMsg, key) + case "value": + err = runtime.DecodeByteArray(string(val), &k.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyOperationResult. +func (k KeyOperationResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "aad", k.AdditionalAuthenticatedData, runtime.Base64URLFormat) + populateByteArray(objectMap, "tag", k.AuthenticationTag, runtime.Base64URLFormat) + populateByteArray(objectMap, "iv", k.IV, runtime.Base64URLFormat) + populate(objectMap, "kid", k.KID) + populateByteArray(objectMap, "value", k.Result, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyOperationResult. +func (k *KeyOperationResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "aad": + err = runtime.DecodeByteArray(string(val), &k.AdditionalAuthenticatedData, runtime.Base64URLFormat) + delete(rawMsg, key) + case "tag": + err = runtime.DecodeByteArray(string(val), &k.AuthenticationTag, runtime.Base64URLFormat) + delete(rawMsg, key) + case "iv": + err = runtime.DecodeByteArray(string(val), &k.IV, runtime.Base64URLFormat) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &k.KID) + delete(rawMsg, key) + case "value": + err = runtime.DecodeByteArray(string(val), &k.Result, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyProperties. +func (k KeyProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", k.Attributes) + populate(objectMap, "kid", k.KID) + populate(objectMap, "managed", k.Managed) + populate(objectMap, "tags", k.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyProperties. +func (k *KeyProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &k.Attributes) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &k.KID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &k.Managed) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &k.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyPropertiesListResult. +func (k KeyPropertiesListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", k.NextLink) + populate(objectMap, "value", k.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyPropertiesListResult. +func (k *KeyPropertiesListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &k.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &k.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyReleasePolicy. +func (k KeyReleasePolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "contentType", k.ContentType) + populateByteArray(objectMap, "data", k.EncodedPolicy, runtime.Base64URLFormat) + populate(objectMap, "immutable", k.Immutable) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyReleasePolicy. +func (k *KeyReleasePolicy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "contentType": + err = unpopulate(val, "ContentType", &k.ContentType) + delete(rawMsg, key) + case "data": + err = runtime.DecodeByteArray(string(val), &k.EncodedPolicy, runtime.Base64URLFormat) + delete(rawMsg, key) + case "immutable": + err = unpopulate(val, "Immutable", &k.Immutable) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyReleaseResult. +func (k KeyReleaseResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "value", k.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyReleaseResult. +func (k *KeyReleaseResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = unpopulate(val, "Value", &k.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyRotationPolicy. +func (k KeyRotationPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", k.Attributes) + populate(objectMap, "id", k.ID) + populate(objectMap, "lifetimeActions", k.LifetimeActions) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyRotationPolicy. +func (k *KeyRotationPolicy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &k.Attributes) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &k.ID) + delete(rawMsg, key) + case "lifetimeActions": + err = unpopulate(val, "LifetimeActions", &k.LifetimeActions) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyRotationPolicyAttributes. +func (k KeyRotationPolicyAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateTimeUnix(objectMap, "created", k.Created) + populate(objectMap, "expiryTime", k.ExpiryTime) + populateTimeUnix(objectMap, "updated", k.Updated) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyRotationPolicyAttributes. +func (k *KeyRotationPolicyAttributes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "created": + err = unpopulateTimeUnix(val, "Created", &k.Created) + delete(rawMsg, key) + case "expiryTime": + err = unpopulate(val, "ExpiryTime", &k.ExpiryTime) + delete(rawMsg, key) + case "updated": + err = unpopulateTimeUnix(val, "Updated", &k.Updated) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyVerifyResult. +func (k KeyVerifyResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "value", k.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyVerifyResult. +func (k *KeyVerifyResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = unpopulate(val, "Value", &k.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LifetimeAction. +func (l LifetimeAction) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "action", l.Action) + populate(objectMap, "trigger", l.Trigger) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LifetimeAction. +func (l *LifetimeAction) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "action": + err = unpopulate(val, "Action", &l.Action) + delete(rawMsg, key) + case "trigger": + err = unpopulate(val, "Trigger", &l.Trigger) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LifetimeActionTrigger. +func (l LifetimeActionTrigger) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "timeAfterCreate", l.TimeAfterCreate) + populate(objectMap, "timeBeforeExpiry", l.TimeBeforeExpiry) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LifetimeActionTrigger. +func (l *LifetimeActionTrigger) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "timeAfterCreate": + err = unpopulate(val, "TimeAfterCreate", &l.TimeAfterCreate) + delete(rawMsg, key) + case "timeBeforeExpiry": + err = unpopulate(val, "TimeBeforeExpiry", &l.TimeBeforeExpiry) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LifetimeActionType. +func (l LifetimeActionType) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "type", l.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LifetimeActionType. +func (l *LifetimeActionType) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "type": + err = unpopulate(val, "Type", &l.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RandomBytes. +func (r RandomBytes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", r.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RandomBytes. +func (r *RandomBytes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &r.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ReleaseParameters. +func (r ReleaseParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "enc", r.Algorithm) + populate(objectMap, "nonce", r.Nonce) + populate(objectMap, "target", r.TargetAttestationToken) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ReleaseParameters. +func (r *ReleaseParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enc": + err = unpopulate(val, "Algorithm", &r.Algorithm) + delete(rawMsg, key) + case "nonce": + err = unpopulate(val, "Nonce", &r.Nonce) + delete(rawMsg, key) + case "target": + err = unpopulate(val, "TargetAttestationToken", &r.TargetAttestationToken) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RestoreKeyParameters. +func (r RestoreKeyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", r.KeyBackup, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RestoreKeyParameters. +func (r *RestoreKeyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &r.KeyBackup, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SignParameters. +func (s SignParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "alg", s.Algorithm) + populateByteArray(objectMap, "value", s.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SignParameters. +func (s *SignParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "alg": + err = unpopulate(val, "Algorithm", &s.Algorithm) + delete(rawMsg, key) + case "value": + err = runtime.DecodeByteArray(string(val), &s.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UpdateKeyParameters. +func (u UpdateKeyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", u.KeyAttributes) + populate(objectMap, "key_ops", u.KeyOps) + populate(objectMap, "release_policy", u.ReleasePolicy) + populate(objectMap, "tags", u.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UpdateKeyParameters. +func (u *UpdateKeyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "KeyAttributes", &u.KeyAttributes) + delete(rawMsg, key) + case "key_ops": + err = unpopulate(val, "KeyOps", &u.KeyOps) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &u.ReleasePolicy) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &u.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type VerifyParameters. +func (v VerifyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "alg", v.Algorithm) + populateByteArray(objectMap, "digest", v.Digest, runtime.Base64URLFormat) + populateByteArray(objectMap, "value", v.Signature, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type VerifyParameters. +func (v *VerifyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "alg": + err = unpopulate(val, "Algorithm", &v.Algorithm) + delete(rawMsg, key) + case "digest": + err = runtime.DecodeByteArray(string(val), &v.Digest, runtime.Base64URLFormat) + delete(rawMsg, key) + case "value": + err = runtime.DecodeByteArray(string(val), &v.Signature, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + } + return nil +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func populateByteArray(m map[string]any, k string, b []byte, f runtime.Base64Encoding) { + if azcore.IsNullValue(b) { + m[k] = nil + } else if len(b) == 0 { + return + } else { + m[k] = runtime.EncodeByteArray(b, f) + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json new file mode 100644 index 00000000..51367ad9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json @@ -0,0 +1,17 @@ +{ + "displayNames": { + "@{ enableHsm = $true }": "HSM" + }, + "include": [ + { + "Agent": { + "ubuntu-20.04": { + "OSVmImage": "MMSUbuntu20.04", + "Pool": "azsdk-pool-mms-ubuntu-2004-general" + } + }, + "ArmTemplateParameters": "@{ enableHsm = $true }", + "GoVersion": ["1.18"] + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/response_types.go new file mode 100644 index 00000000..e9fe38e6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/response_types.go @@ -0,0 +1,130 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azkeys + +// BackupKeyResponse contains the response from method Client.BackupKey. +type BackupKeyResponse struct { + BackupKeyResult +} + +// CreateKeyResponse contains the response from method Client.CreateKey. +type CreateKeyResponse struct { + KeyBundle +} + +// DecryptResponse contains the response from method Client.Decrypt. +type DecryptResponse struct { + KeyOperationResult +} + +// DeleteKeyResponse contains the response from method Client.DeleteKey. +type DeleteKeyResponse struct { + DeletedKey +} + +// EncryptResponse contains the response from method Client.Encrypt. +type EncryptResponse struct { + KeyOperationResult +} + +// GetDeletedKeyResponse contains the response from method Client.GetDeletedKey. +type GetDeletedKeyResponse struct { + DeletedKey +} + +// GetKeyResponse contains the response from method Client.GetKey. +type GetKeyResponse struct { + KeyBundle +} + +// GetKeyRotationPolicyResponse contains the response from method Client.GetKeyRotationPolicy. +type GetKeyRotationPolicyResponse struct { + KeyRotationPolicy +} + +// GetRandomBytesResponse contains the response from method Client.GetRandomBytes. +type GetRandomBytesResponse struct { + RandomBytes +} + +// ImportKeyResponse contains the response from method Client.ImportKey. +type ImportKeyResponse struct { + KeyBundle +} + +// ListDeletedKeyPropertiesResponse contains the response from method Client.NewListDeletedKeyPropertiesPager. +type ListDeletedKeyPropertiesResponse struct { + DeletedKeyPropertiesListResult +} + +// ListKeyPropertiesResponse contains the response from method Client.NewListKeyPropertiesPager. +type ListKeyPropertiesResponse struct { + KeyPropertiesListResult +} + +// ListKeyPropertiesVersionsResponse contains the response from method Client.NewListKeyPropertiesVersionsPager. +type ListKeyPropertiesVersionsResponse struct { + KeyPropertiesListResult +} + +// PurgeDeletedKeyResponse contains the response from method Client.PurgeDeletedKey. +type PurgeDeletedKeyResponse struct { + // placeholder for future response values +} + +// RecoverDeletedKeyResponse contains the response from method Client.RecoverDeletedKey. +type RecoverDeletedKeyResponse struct { + KeyBundle +} + +// ReleaseResponse contains the response from method Client.Release. +type ReleaseResponse struct { + KeyReleaseResult +} + +// RestoreKeyResponse contains the response from method Client.RestoreKey. +type RestoreKeyResponse struct { + KeyBundle +} + +// RotateKeyResponse contains the response from method Client.RotateKey. +type RotateKeyResponse struct { + KeyBundle +} + +// SignResponse contains the response from method Client.Sign. +type SignResponse struct { + KeyOperationResult +} + +// UnwrapKeyResponse contains the response from method Client.UnwrapKey. +type UnwrapKeyResponse struct { + KeyOperationResult +} + +// UpdateKeyResponse contains the response from method Client.UpdateKey. +type UpdateKeyResponse struct { + KeyBundle +} + +// UpdateKeyRotationPolicyResponse contains the response from method Client.UpdateKeyRotationPolicy. +type UpdateKeyRotationPolicyResponse struct { + KeyRotationPolicy +} + +// VerifyResponse contains the response from method Client.Verify. +type VerifyResponse struct { + KeyVerifyResult +} + +// WrapKeyResponse contains the response from method Client.WrapKey. +type WrapKeyResponse struct { + KeyOperationResult +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources-post.ps1 new file mode 100644 index 00000000..80f20c0c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources-post.ps1 @@ -0,0 +1,118 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# IMPORTANT: Do not invoke this file directly. Please instead run eng/New-TestResources.ps1 from the repository root. + +#Requires -Version 6.0 +#Requires -PSEdition Core + +using namespace System.Security.Cryptography +using namespace System.Security.Cryptography.X509Certificates + +# Use same parameter names as declared in eng/New-TestResources.ps1 (assume validation therein). +[CmdletBinding(SupportsShouldProcess = $true, ConfirmImpact = 'Medium')] +param ( + [Parameter()] + [hashtable] $DeploymentOutputs, + + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). + [Parameter(ValueFromRemainingArguments = $true)] + $RemainingArguments +) + +# By default stop for any error. +if (!$PSBoundParameters.ContainsKey('ErrorAction')) { + $ErrorActionPreference = 'Stop' +} + +function Log($Message) { + Write-Host ('{0} - {1}' -f [DateTime]::Now.ToLongTimeString(), $Message) +} + +function New-X509Certificate2([string] $SubjectName) { + + $rsa = [RSA]::Create(2048) + try { + $req = [CertificateRequest]::new( + [string] $SubjectName, + $rsa, + [HashAlgorithmName]::SHA256, + [RSASignaturePadding]::Pkcs1 + ) + + # TODO: Add any KUs necessary to $req.CertificateExtensions + + $NotBefore = [DateTimeOffset]::Now.AddDays(-1) + $NotAfter = $NotBefore.AddDays(365) + + $req.CreateSelfSigned($NotBefore, $NotAfter) + } + finally { + $rsa.Dispose() + } +} + +function Export-X509Certificate2([string] $Path, [X509Certificate2] $Certificate) { + + $Certificate.Export([X509ContentType]::Pfx) | Set-Content $Path -AsByteStream +} + +function Export-X509Certificate2PEM([string] $Path, [X509Certificate2] $Certificate) { + +@" +-----BEGIN CERTIFICATE----- +$([Convert]::ToBase64String($Certificate.RawData, 'InsertLineBreaks')) +-----END CERTIFICATE----- +"@ > $Path + +} + +# Make sure we deployed a Managed HSM. +if (!$DeploymentOutputs['AZURE_MANAGEDHSM_URL']) { + Log "Managed HSM not deployed; skipping activation" + exit +} + +[Uri] $hsmUrl = $DeploymentOutputs['AZURE_MANAGEDHSM_URL'] +$hsmName = $hsmUrl.Host.Substring(0, $hsmUrl.Host.IndexOf('.')) + +Log 'Creating 3 X509 certificates to activate security domain' +$wrappingFiles = foreach ($i in 0..2) { + $certificate = New-X509Certificate2 "CN=$($hsmUrl.Host)" + + $baseName = "$PSScriptRoot\$hsmName-certificate$i" + Export-X509Certificate2 "$baseName.pfx" $certificate + Export-X509Certificate2PEM "$baseName.cer" $certificate + + Resolve-Path "$baseName.cer" +} + +Log "Downloading security domain from '$hsmUrl'" + +$sdPath = "$PSScriptRoot\$hsmName-security-domain.key" +if (Test-Path $sdpath) { + Log "Deleting old security domain: $sdPath" + Remove-Item $sdPath -Force +} + +Export-AzKeyVaultSecurityDomain -Name $hsmName -Quorum 2 -Certificates $wrappingFiles -OutputPath $sdPath -ErrorAction SilentlyContinue -Verbose +if ( !$? ) { + Write-Host $Error[0].Exception + Write-Error $Error[0] + + exit +} + +Log "Security domain downloaded to '$sdPath'; Managed HSM is now active at '$hsmUrl'" + +# Force a sleep to wait for Managed HSM activation to propagate through Cosmos replication. Issue tracked in Azure DevOps. +Log 'Sleeping for 30 seconds to allow activation to propagate...' +Start-Sleep -Seconds 30 + +$testApplicationOid = $DeploymentOutputs['CLIENT_OBJECTID'] + +Log "Creating additional required role assignments for '$testApplicationOid'" +$null = New-AzKeyVaultRoleAssignment -HsmName $hsmName -RoleDefinitionName 'Managed HSM Crypto Officer' -ObjectID $testApplicationOid +$null = New-AzKeyVaultRoleAssignment -HsmName $hsmName -RoleDefinitionName 'Managed HSM Crypto User' -ObjectID $testApplicationOid + +Log "Role assignments created for '$testApplicationOid'" \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources.json new file mode 100644 index 00000000..abc737a7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources.json @@ -0,0 +1,296 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "string", + "defaultValue": "[resourceGroup().name]", + "metadata": { + "description": "The base resource name." + } + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The client OID to grant access to test resources." + } + }, + "provisionerApplicationOid": { + "type": "string", + "metadata": { + "description": "The provisioner OID to grant access to test resources." + } + }, + "location": { + "type": "string", + "defaultValue": "[resourceGroup().location]", + "metadata": { + "description": "The location of the resource. By default, this is the same as the resource group." + } + }, + "hsmLocation": { + "type": "string", + "defaultValue": "uksouth", + "allowedValues": [ + "australiacentral", + "australiaeast", + "canadacentral", + "canadaeast", + "centralindia", + "centralus", + "eastasia", + "eastus", + "eastus2", + "francecentral", + "japaneast", + "koreacentral", + "northcentralus", + "northeurope", + "southafricanorth", + "southcentralus", + "switzerlandnorth", + "switzerlandwest", + "uaenorth", + "uksouth", + "westcentralus", + "westeurope", + "westus", + "westus2", + "westus3" + ], + "metadata": { + "description": "The location of the Managed HSM." + } + }, + "enableHsm": { + "type": "bool", + "defaultValue": false, + "metadata": { + "description": "Whether to enable deployment of Managed HSM. The default is false." + } + }, + "keyVaultSku": { + "type": "string", + "defaultValue": "premium", + "metadata": { + "description": "Key Vault SKU to deploy. The default is 'premium'" + } + }, + "attestationImage": { + "type": "string", + "defaultValue": "keyvault-mock-attestation:latest", + "metadata": { + "description": "The container image name and tag to use for the attestation mock service." + } + } + }, + "variables": { + "attestationFarm": "[concat(parameters('baseName'), 'farm')]", + "attestationSite": "[concat(parameters('baseName'), 'site')]", + "attestationUri": "[concat('DOCKER|azsdkengsys.azurecr.io/', parameters('attestationImage'))]", + "kvApiVersion": "2019-09-01", + "kvName": "[parameters('baseName')]", + "kvAdminDefinitionId": "00482a5a-887f-4fb3-b363-3b7fe8e74483", + "kvAdminAssignmentName": "[guid(resourceGroup().id, variables('kvAdminDefinitionId'), parameters('testApplicationOid'))]", + "hsmApiVersion": "2021-04-01-preview", + "hsmName": "[concat(parameters('baseName'), 'hsm')]", + "mgmtApiVersion": "2019-04-01", + "blobContainerName": "backup", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "encryption": { + "services": { + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.KeyVault/vaults", + "apiVersion": "[variables('kvApiVersion')]", + "name": "[variables('kvName')]", + "location": "[parameters('location')]", + "properties": { + "sku": { + "family": "A", + "name": "[parameters('keyVaultSku')]" + }, + "tenantId": "[parameters('tenantId')]", + "enabledForDeployment": false, + "enabledForDiskEncryption": false, + "enabledForTemplateDeployment": false, + "enableSoftDelete": true, + "enableRbacAuthorization": true, + "softDeleteRetentionInDays": 7 + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "2020-04-01-preview", + "name": "[variables('kvAdminAssignmentName')]", + "properties": { + "roleDefinitionId": "[resourceId('Microsoft.Authorization/roleDefinitions', variables('kvAdminDefinitionId'))]", + "principalId": "[parameters('testApplicationOid')]", + "scope": "[resourceGroup().id]" + } + }, + { + "type": "Microsoft.KeyVault/managedHSMs", + "apiVersion": "[variables('hsmApiVersion')]", + "name": "[variables('hsmName')]", + "condition": "[parameters('enableHsm')]", + "location": "[parameters('hsmLocation')]", + "sku": { + "family": "B", + "name": "Standard_B1" + }, + "properties": { + "tenantId": "[parameters('tenantId')]", + "initialAdminObjectIds": "[union(array(parameters('testApplicationOid')), array(parameters('provisionerApplicationOid')))]", + "enablePurgeProtection": false, + "enableSoftDelete": true, + "softDeleteRetentionInDays": 7, + "publicNetworkAccess": "Enabled", + "networkAcls": "[variables('networkAcls')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[parameters('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "2019-06-01", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName'))]" + ], + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "properties": { + "cors": { + "corsRules": [] + }, + "deleteRetentionPolicy": { + "enabled": false + } + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices/containers", + "apiVersion": "2019-06-01", + "name": "[concat(variables('primaryAccountName'), '/default/', variables('blobContainerName'))]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts/blobServices', variables('primaryAccountName'), 'default')]", + "[resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName'))]" + ], + "properties": { + "publicAccess": "None" + } + }, + { + + "type": "Microsoft.Web/serverfarms", + "apiVersion": "2020-12-01", + "name": "[variables('attestationFarm')]", + "location": "[parameters('location')]", + "kind": "linux", + "sku": { + "name": "B1" + }, + "properties": { + "reserved": true + } + }, + { + + "type": "Microsoft.Web/sites", + "apiVersion": "2020-12-01", + "name": "[variables('attestationSite')]", + "dependsOn": [ + "[resourceId('Microsoft.Web/serverfarms', variables('attestationFarm'))]" + ], + "location": "[parameters('location')]", + "properties": { + "httpsOnly": true, + "serverFarmId": "[resourceId('Microsoft.Web/serverfarms', variables('attestationFarm'))]", + "siteConfig": { + "name": "[variables('attestationSite')]", + "alwaysOn": true, + "linuxFxVersion": "[variables('attestationUri')]", + "appSettings": [ + { + "name": "WEBSITES_ENABLE_APP_SERVICE_STORAGE", + "value": "false" + } + ] + } + } + } + ], + "outputs": { + "AZURE_KEYVAULT_URL": { + "type": "string", + "value": "[reference(variables('kvName')).vaultUri]" + }, + "AZURE_MANAGEDHSM_URL": { + "type": "string", + "condition": "[parameters('enableHsm')]", + "value": "[reference(variables('hsmName')).hsmUri]" + }, + "KEYVAULT_SKU": { + "type": "string", + "value": "[reference(parameters('baseName')).sku.name]" + }, + "CLIENT_OBJECTID": { + "type": "string", + "value": "[parameters('testApplicationOid')]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "BLOB_PRIMARY_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(variables('primaryAccountName'), variables('mgmtApiVersion')).keys[0].value]" + }, + "BLOB_CONTAINER_NAME" : { + "type": "string", + "value": "[variables('blobContainerName')]" + }, + "AZURE_KEYVAULT_ATTESTATION_URL": { + "type": "string", + "value": "[format('https://{0}/', reference(variables('attestationSite')).defaultHostName)]" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/time_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/time_unix.go new file mode 100644 index 00000000..077d3ae4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/time_unix.go @@ -0,0 +1,62 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azkeys + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "strings" + "time" +) + +type timeUnix time.Time + +func (t timeUnix) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(t).Unix()) +} + +func (t *timeUnix) UnmarshalJSON(data []byte) error { + var seconds int64 + if err := json.Unmarshal(data, &seconds); err != nil { + return err + } + *t = timeUnix(time.Unix(seconds, 0)) + return nil +} + +func (t timeUnix) String() string { + return fmt.Sprintf("%d", time.Time(t).Unix()) +} + +func populateTimeUnix(m map[string]any, k string, t *time.Time) { + if t == nil { + return + } else if azcore.IsNullValue(t) { + m[k] = nil + return + } else if reflect.ValueOf(t).IsNil() { + return + } + m[k] = (*timeUnix)(t) +} + +func unpopulateTimeUnix(data json.RawMessage, fn string, t **time.Time) error { + if data == nil || strings.EqualFold(string(data), "null") { + return nil + } + var aux timeUnix + if err := json.Unmarshal(data, &aux); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + *t = (*time.Time)(&aux) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go new file mode 100644 index 00000000..0eff0664 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azkeys + +const ( + moduleName = "azkeys" + version = "v0.12.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md new file mode 100644 index 00000000..d91660b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md @@ -0,0 +1,62 @@ +# Release History + +## 0.8.0 (2023-03-08) + +### Breaking Changes +* Moved to new location + +### Other Changes +* Upgrade dependencies + +## 0.7.1 (2022-11-14) + +### Bugs Fixed +* `KeyVaultChallengePolicy` uses incorrect authentication scope when challenge verification is disabled + +## 0.7.0 (2022-09-20) + +### Breaking Changes +* Added `*KeyVaultChallengePolicyOptions` parameter to `NewKeyVaultChallengePolicy` + +## 0.6.0 (2022-09-12) + +### Breaking Changes +* Verify the challenge resource matches the vault domain. See https://aka.ms/azsdk/blog/vault-uri for more information. +* `ParseID()` no longer appends a trailing slash to vault URLs + +## 0.5.0 (2022-05-12) + +### Breaking Changes +* Removed `ExpiringResource` and its dependencies in favor of shared implementation from `internal/temporal`. + +### Other Changes +* Updated to latest versions of `azcore` and `internal`. + +## 0.4.0 (2022-04-22) + +### Breaking Changes +* Updated `ExpiringResource` and its dependent types to use generics. + +### Other Changes +* Remove reference to `TokenRequestOptions.TenantID` as it's been removed and wasn't working anyways. + +## 0.3.0 (2022-04-04) + +### Features Added +* Adds the `ParseKeyvaultID` function to parse an ID into the Key Vault URL, item name, and item version + +### Breaking Changes +* Updates to azcore v0.23.0 + +## 0.2.1 (2022-01-31) + +### Bugs Fixed +* Avoid retries on terminal failures (#16932) + +## 0.2.0 (2022-01-12) + +### Bugs Fixed +* Fixes a bug with Managed HSMs that prevented correctly authorizing requests. + +## 0.1.0 (2021-11-09) +* This is the initial release of the `internal` library for KeyVault diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/LICENSE.txt new file mode 100644 index 00000000..d1ca00f2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/README.md new file mode 100644 index 00000000..8516337c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/README.md @@ -0,0 +1,21 @@ +# Key Vault Internal Module for Go + +This module contains shared code for all the Key Vault SDKs, mainly the challenge authentication policy. + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go new file mode 100644 index 00000000..f5c8b725 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go @@ -0,0 +1,175 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +const challengeMatchError = `challenge resource "%s" doesn't match the requested domain. Set DisableChallengeResourceVerification to true in your client options to disable. See https://aka.ms/azsdk/blog/vault-uri for more information` + +type KeyVaultChallengePolicyOptions struct { + // DisableChallengeResourceVerification controls whether the policy requires the + // authentication challenge resource to match the Key Vault or Managed HSM domain + DisableChallengeResourceVerification bool +} + +type keyVaultAuthorizer struct { + // tro is the policy's authentication parameters. These are discovered from an authentication challenge + // elicited ahead of the first client request. + tro policy.TokenRequestOptions + // TODO: move into tro once it has a tenant field (https://github.com/Azure/azure-sdk-for-go/issues/19841) + tenantID string + verifyChallengeResource bool +} + +type reqBody struct { + body io.ReadSeekCloser + contentType string +} + +func NewKeyVaultChallengePolicy(cred azcore.TokenCredential, opts *KeyVaultChallengePolicyOptions) policy.Policy { + if opts == nil { + opts = &KeyVaultChallengePolicyOptions{} + } + kv := keyVaultAuthorizer{ + verifyChallengeResource: !opts.DisableChallengeResourceVerification, + } + return runtime.NewBearerTokenPolicy(cred, nil, &policy.BearerTokenOptions{ + AuthorizationHandler: policy.AuthorizationHandler{ + OnRequest: kv.authorize, + OnChallenge: kv.authorizeOnChallenge, + }, + }) +} + +func (k *keyVaultAuthorizer) authorize(req *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { + if len(k.tro.Scopes) == 0 || k.tenantID == "" { + if body := req.Body(); body != nil { + // We don't know the scope or tenant ID because we haven't seen a challenge yet. We elicit one now by sending + // the request without authorization, first removing its body, if any. authorizeOnChallenge will reattach the + // body, authorize the request, and send it again. + rb := reqBody{body, req.Raw().Header.Get("content-type")} + req.SetOperationValue(rb) + if err := req.SetBody(nil, ""); err != nil { + return err + } + } + // returning nil indicates the bearer token policy should send the request + return nil + } + // else we know the auth parameters and can authorize the request as normal + return authNZ(k.tro) +} + +func (k *keyVaultAuthorizer) authorizeOnChallenge(req *policy.Request, res *http.Response, authNZ func(policy.TokenRequestOptions) error) error { + // parse the challenge + if err := k.updateTokenRequestOptions(res, req.Raw()); err != nil { + return err + } + // reattach the request's original body, if it was removed by authorize(). If a bug prevents recovering + // the body, this policy will send the request without it and get a 400 response from Key Vault. + var rb reqBody + if req.OperationValue(&rb) { + if err := req.SetBody(rb.body, rb.contentType); err != nil { + return err + } + } + // authenticate with the parameters supplied by Key Vault, authorize the request, send it again + return authNZ(k.tro) +} + +// parses Tenant ID from auth challenge +// https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000 +func parseTenant(url string) string { + if url == "" { + return "" + } + parts := strings.Split(url, "/") + tenant := parts[3] + tenant = strings.ReplaceAll(tenant, ",", "") + return tenant +} + +type challengePolicyError struct { + err error +} + +func (c *challengePolicyError) Error() string { + return c.err.Error() +} + +func (*challengePolicyError) NonRetriable() { + // marker method +} + +func (c *challengePolicyError) Unwrap() error { + return c.err +} + +var _ errorinfo.NonRetriable = (*challengePolicyError)(nil) + +// updateTokenRequestOptions parses authentication parameters from Key Vault's challenge +func (k *keyVaultAuthorizer) updateTokenRequestOptions(resp *http.Response, req *http.Request) error { + authHeader := resp.Header.Get("WWW-Authenticate") + if authHeader == "" { + return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")} + } + + // Strip down to auth and resource + // Format is "Bearer authorization=\"\" resource=\"\"" OR + // "Bearer authorization=\"\" scope=\"\" resource=\"\"" + authHeader = strings.ReplaceAll(authHeader, "Bearer ", "") + + parts := strings.Split(authHeader, " ") + + vals := map[string]string{} + for _, part := range parts { + subParts := strings.Split(part, "=") + if len(subParts) == 2 { + stripped := strings.ReplaceAll(subParts[1], "\"", "") + stripped = strings.TrimSuffix(stripped, ",") + vals[subParts[0]] = stripped + } + } + + k.tenantID = parseTenant(vals["authorization"]) + scope := "" + if v, ok := vals["scope"]; ok { + scope = v + } else if v, ok := vals["resource"]; ok { + scope = v + } + if scope == "" { + return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")} + } + if k.verifyChallengeResource { + // the challenge resource's host must match the requested vault's host + parsed, err := url.Parse(scope) + if err != nil { + return &challengePolicyError{err: fmt.Errorf(`invalid challenge resource "%s": %v`, scope, err)} + } + if !strings.HasSuffix(req.URL.Host, "."+parsed.Host) { + return &challengePolicyError{err: fmt.Errorf(challengeMatchError, scope)} + } + } + if !strings.HasSuffix(scope, "/.default") { + scope += "/.default" + } + k.tro.Scopes = []string{scope} + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml new file mode 100644 index 00000000..2f8b8e1a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml @@ -0,0 +1,28 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/internal + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/internal + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'security/keyvault/internal' + RunLiveTests: false diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go new file mode 100644 index 00000000..610f1544 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go @@ -0,0 +1,11 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal + +const ( + version = "v0.8.0" //nolint +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/doc.go new file mode 100644 index 00000000..d8f93492 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/parse.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/parse.go new file mode 100644 index 00000000..8511832d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/parse.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +package internal + +import ( + "fmt" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" +) + +// ParseID parses "https://myvaultname.vault.azure.net/keys/key1053998307/b86c2e6ad9054f4abf69cc185b99aa60" +// into "https://myvaultname.managedhsm.azure.net/", "key1053998307", and "b86c2e6ad9054f4abf69cc185b99aa60" +func ParseID(id *string) (*string, *string, *string) { + if id == nil { + return nil, nil, nil + } + parsed, err := url.Parse(*id) + if err != nil { + return nil, nil, nil + } + + url := fmt.Sprintf("%s://%s", parsed.Scheme, parsed.Host) + split := strings.Split(strings.TrimPrefix(parsed.Path, "/"), "/") + if len(split) < 3 { + if len(split) == 2 { + return &url, to.Ptr(split[1]), nil + } + return &url, nil, nil + } + + return &url, to.Ptr(split[1]), to.Ptr(split[2]) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/auth/auth.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/auth/auth.go deleted file mode 100644 index 1f183448..00000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/auth/auth.go +++ /dev/null @@ -1,65 +0,0 @@ -package auth - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -import ( - "os" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/azure/auth" -) - -// NewAuthorizerFromEnvironment creates a keyvault dataplane Authorizer configured from environment variables in the order: -// 1. Client credentials -// 2. Client certificate -// 3. Username password -// 4. MSI -func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) { - res, err := getResource() - if err != nil { - return nil, err - } - return auth.NewAuthorizerFromEnvironmentWithResource(res) -} - -// NewAuthorizerFromFile creates a keyvault dataplane Authorizer configured from a configuration file. -// The path to the configuration file must be specified in the AZURE_AUTH_LOCATION environment variable. -func NewAuthorizerFromFile() (autorest.Authorizer, error) { - res, err := getResource() - if err != nil { - return nil, err - } - return auth.NewAuthorizerFromFileWithResource(res) -} - -// NewAuthorizerFromCLI creates a keyvault dataplane Authorizer configured from Azure CLI 2.0 for local development scenarios. -func NewAuthorizerFromCLI() (autorest.Authorizer, error) { - res, err := getResource() - if err != nil { - return nil, err - } - return auth.NewAuthorizerFromCLIWithResource(res) -} - -func getResource() (string, error) { - var env azure.Environment - - if envName := os.Getenv("AZURE_ENVIRONMENT"); envName == "" { - env = azure.PublicCloud - } else { - var err error - env, err = azure.EnvironmentFromName(envName) - if err != nil { - return "", err - } - } - - resource := os.Getenv("AZURE_KEYVAULT_RESOURCE") - if resource == "" { - resource = env.ResourceIdentifiers.KeyVault - } - - return resource, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/CHANGELOG.md deleted file mode 100644 index 6c701c1c..00000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/CHANGELOG.md +++ /dev/null @@ -1,26 +0,0 @@ -# Change History - -## Additive Changes - -### New Funcs - -1. BackupCertificateResult.MarshalJSON() ([]byte, error) -1. BackupKeyResult.MarshalJSON() ([]byte, error) -1. BackupSecretResult.MarshalJSON() ([]byte, error) -1. BackupStorageResult.MarshalJSON() ([]byte, error) -1. CertificateIssuerListResult.MarshalJSON() ([]byte, error) -1. CertificateListResult.MarshalJSON() ([]byte, error) -1. DeletedCertificateListResult.MarshalJSON() ([]byte, error) -1. DeletedKeyListResult.MarshalJSON() ([]byte, error) -1. DeletedSasDefinitionListResult.MarshalJSON() ([]byte, error) -1. DeletedSecretListResult.MarshalJSON() ([]byte, error) -1. DeletedStorageListResult.MarshalJSON() ([]byte, error) -1. Error.MarshalJSON() ([]byte, error) -1. ErrorType.MarshalJSON() ([]byte, error) -1. KeyListResult.MarshalJSON() ([]byte, error) -1. KeyOperationResult.MarshalJSON() ([]byte, error) -1. KeyVerifyResult.MarshalJSON() ([]byte, error) -1. PendingCertificateSigningRequestResult.MarshalJSON() ([]byte, error) -1. SasDefinitionListResult.MarshalJSON() ([]byte, error) -1. SecretListResult.MarshalJSON() ([]byte, error) -1. StorageListResult.MarshalJSON() ([]byte, error) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/client.go deleted file mode 100644 index e101ccbd..00000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/client.go +++ /dev/null @@ -1,7313 +0,0 @@ -// Package keyvault implements the Azure ARM Keyvault service API version 7.1. -// -// The key vault client performs cryptographic key operations and vault operations against the Key Vault service. -package keyvault - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// BaseClient is the base client for Keyvault. -type BaseClient struct { - autorest.Client -} - -// New creates an instance of the BaseClient client. -func New() BaseClient { - return NewWithoutDefaults() -} - -// NewWithoutDefaults creates an instance of the BaseClient client. -func NewWithoutDefaults() BaseClient { - return BaseClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - } -} - -// BackupCertificate requests that a backup of the specified certificate be downloaded to the client. All versions of -// the certificate will be downloaded. This operation requires the certificates/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) BackupCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result BackupCertificateResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.BackupCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", resp, "Failure sending request") - return - } - - result, err = client.BackupCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", resp, "Failure responding to request") - return - } - - return -} - -// BackupCertificatePreparer prepares the BackupCertificate request. -func (client BaseClient) BackupCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupCertificateSender sends the BackupCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// BackupCertificateResponder handles the response to the BackupCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupCertificateResponder(resp *http.Response) (result BackupCertificateResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// BackupKey the Key Backup operation exports a key from Azure Key Vault in a protected form. Note that this operation -// does NOT return key material in a form that can be used outside the Azure Key Vault system, the returned key -// material is either protected to a Azure Key Vault HSM or to Azure Key Vault itself. The intent of this operation is -// to allow a client to GENERATE a key in one Azure Key Vault instance, BACKUP the key, and then RESTORE it into -// another Azure Key Vault instance. The BACKUP operation may be used to export, in protected form, any key type from -// Azure Key Vault. Individual versions of a key cannot be backed up. BACKUP / RESTORE can be performed within -// geographical boundaries only; meaning that a BACKUP from one geographical area cannot be restored to another -// geographical area. For example, a backup from the US geographical area cannot be restored in an EU geographical -// area. This operation requires the key/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -func (client BaseClient) BackupKey(ctx context.Context, vaultBaseURL string, keyName string) (result BackupKeyResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", nil, "Failure preparing request") - return - } - - resp, err := client.BackupKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", resp, "Failure sending request") - return - } - - result, err = client.BackupKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", resp, "Failure responding to request") - return - } - - return -} - -// BackupKeyPreparer prepares the BackupKey request. -func (client BaseClient) BackupKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupKeySender sends the BackupKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// BackupKeyResponder handles the response to the BackupKey request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupKeyResponder(resp *http.Response) (result BackupKeyResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// BackupSecret requests that a backup of the specified secret be downloaded to the client. All versions of the secret -// will be downloaded. This operation requires the secrets/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) BackupSecret(ctx context.Context, vaultBaseURL string, secretName string) (result BackupSecretResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", nil, "Failure preparing request") - return - } - - resp, err := client.BackupSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", resp, "Failure sending request") - return - } - - result, err = client.BackupSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", resp, "Failure responding to request") - return - } - - return -} - -// BackupSecretPreparer prepares the BackupSecret request. -func (client BaseClient) BackupSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupSecretSender sends the BackupSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// BackupSecretResponder handles the response to the BackupSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupSecretResponder(resp *http.Response) (result BackupSecretResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// BackupStorageAccount requests that a backup of the specified storage account be downloaded to the client. This -// operation requires the storage/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) BackupStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result BackupStorageResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.BackupStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.BackupStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// BackupStorageAccountPreparer prepares the BackupStorageAccount request. -func (client BaseClient) BackupStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupStorageAccountSender sends the BackupStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// BackupStorageAccountResponder handles the response to the BackupStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupStorageAccountResponder(resp *http.Response) (result BackupStorageResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateCertificate if this is the first version, the certificate resource is created. This operation requires the -// certificates/create permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// parameters - the parameters to create a certificate. -func (client BaseClient) CreateCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateCreateParameters) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CreateCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: certificateName, - Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CertificatePolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}}}, - }}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "CreateCertificate", err.Error()) - } - - req, err := client.CreateCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", resp, "Failure sending request") - return - } - - result, err = client.CreateCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", resp, "Failure responding to request") - return - } - - return -} - -// CreateCertificatePreparer prepares the CreateCertificate request. -func (client BaseClient) CreateCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/create", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateCertificateSender sends the CreateCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) CreateCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// CreateCertificateResponder handles the response to the CreateCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) CreateCertificateResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateKey the create key operation can be used to create any key type in Azure Key Vault. If the named key already -// exists, Azure Key Vault creates a new version of the key. It requires the keys/create permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name for the new key. The system will generate the version name for the new key. -// parameters - the parameters to create a key. -func (client BaseClient) CreateKey(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyCreateParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CreateKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: keyName, - Constraints: []validation.Constraint{{Target: "keyName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "CreateKey", err.Error()) - } - - req, err := client.CreateKeyPreparer(ctx, vaultBaseURL, keyName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", nil, "Failure preparing request") - return - } - - resp, err := client.CreateKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", resp, "Failure sending request") - return - } - - result, err = client.CreateKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", resp, "Failure responding to request") - return - } - - return -} - -// CreateKeyPreparer prepares the CreateKey request. -func (client BaseClient) CreateKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/create", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateKeySender sends the CreateKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) CreateKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// CreateKeyResponder handles the response to the CreateKey request. The method always -// closes the http.Response Body. -func (client BaseClient) CreateKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Decrypt the DECRYPT operation decrypts a well-formed block of ciphertext using the target encryption key and -// specified algorithm. This operation is the reverse of the ENCRYPT operation; only a single block of data may be -// decrypted, the size of this block is dependent on the target key and the algorithm to be used. The DECRYPT operation -// applies to asymmetric and symmetric keys stored in Azure Key Vault since it uses the private portion of the key. -// This operation requires the keys/decrypt permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the decryption operation. -func (client BaseClient) Decrypt(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Decrypt") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Decrypt", err.Error()) - } - - req, err := client.DecryptPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", nil, "Failure preparing request") - return - } - - resp, err := client.DecryptSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", resp, "Failure sending request") - return - } - - result, err = client.DecryptResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", resp, "Failure responding to request") - return - } - - return -} - -// DecryptPreparer prepares the Decrypt request. -func (client BaseClient) DecryptPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/decrypt", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DecryptSender sends the Decrypt request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DecryptSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DecryptResponder handles the response to the Decrypt request. The method always -// closes the http.Response Body. -func (client BaseClient) DecryptResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificate deletes all versions of a certificate object along with its associated policy. Delete certificate -// cannot be used to remove individual versions of a certificate object. This operation requires the -// certificates/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) DeleteCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result DeletedCertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", resp, "Failure responding to request") - return - } - - return -} - -// DeleteCertificatePreparer prepares the DeleteCertificate request. -func (client BaseClient) DeleteCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateSender sends the DeleteCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteCertificateResponder handles the response to the DeleteCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateResponder(resp *http.Response) (result DeletedCertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificateContacts deletes the certificate contacts for a specified key vault certificate. This operation -// requires the certificates/managecontacts permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -func (client BaseClient) DeleteCertificateContacts(ctx context.Context, vaultBaseURL string) (result Contacts, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateContacts") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificateContactsPreparer(ctx, vaultBaseURL) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateContactsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateContactsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", resp, "Failure responding to request") - return - } - - return -} - -// DeleteCertificateContactsPreparer prepares the DeleteCertificateContacts request. -func (client BaseClient) DeleteCertificateContactsPreparer(ctx context.Context, vaultBaseURL string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/contacts"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateContactsSender sends the DeleteCertificateContacts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateContactsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteCertificateContactsResponder handles the response to the DeleteCertificateContacts request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificateIssuer the DeleteCertificateIssuer operation permanently removes the specified certificate issuer -// from the vault. This operation requires the certificates/manageissuers/deleteissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -func (client BaseClient) DeleteCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", resp, "Failure responding to request") - return - } - - return -} - -// DeleteCertificateIssuerPreparer prepares the DeleteCertificateIssuer request. -func (client BaseClient) DeleteCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateIssuerSender sends the DeleteCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteCertificateIssuerResponder handles the response to the DeleteCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificateOperation deletes the creation operation for a specified certificate that is in the process of -// being created. The certificate is no longer created. This operation requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) DeleteCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateOperation") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificateOperationPreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateOperationSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateOperationResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", resp, "Failure responding to request") - return - } - - return -} - -// DeleteCertificateOperationPreparer prepares the DeleteCertificateOperation request. -func (client BaseClient) DeleteCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateOperationSender sends the DeleteCertificateOperation request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteCertificateOperationResponder handles the response to the DeleteCertificateOperation request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteKey the delete key operation cannot be used to remove individual versions of a key. This operation removes the -// cryptographic material associated with the key, which means the key is not usable for Sign/Verify, Wrap/Unwrap or -// Encrypt/Decrypt operations. This operation requires the keys/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key to delete. -func (client BaseClient) DeleteKey(ctx context.Context, vaultBaseURL string, keyName string) (result DeletedKeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", resp, "Failure sending request") - return - } - - result, err = client.DeleteKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", resp, "Failure responding to request") - return - } - - return -} - -// DeleteKeyPreparer prepares the DeleteKey request. -func (client BaseClient) DeleteKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteKeySender sends the DeleteKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteKeyResponder handles the response to the DeleteKey request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteKeyResponder(resp *http.Response) (result DeletedKeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteSasDefinition deletes a SAS definition from a specified storage account. This operation requires the -// storage/deletesas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) DeleteSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result DeletedSasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "DeleteSasDefinition", err.Error()) - } - - req, err := client.DeleteSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.DeleteSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// DeleteSasDefinitionPreparer prepares the DeleteSasDefinition request. -func (client BaseClient) DeleteSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSasDefinitionSender sends the DeleteSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteSasDefinitionResponder handles the response to the DeleteSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteSasDefinitionResponder(resp *http.Response) (result DeletedSasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteSecret the DELETE operation applies to any secret stored in Azure Key Vault. DELETE cannot be applied to an -// individual version of a secret. This operation requires the secrets/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) DeleteSecret(ctx context.Context, vaultBaseURL string, secretName string) (result DeletedSecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", resp, "Failure sending request") - return - } - - result, err = client.DeleteSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", resp, "Failure responding to request") - return - } - - return -} - -// DeleteSecretPreparer prepares the DeleteSecret request. -func (client BaseClient) DeleteSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSecretSender sends the DeleteSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteSecretResponder handles the response to the DeleteSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteSecretResponder(resp *http.Response) (result DeletedSecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteStorageAccount deletes a storage account. This operation requires the storage/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) DeleteStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result DeletedStorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "DeleteStorageAccount", err.Error()) - } - - req, err := client.DeleteStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.DeleteStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// DeleteStorageAccountPreparer prepares the DeleteStorageAccount request. -func (client BaseClient) DeleteStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteStorageAccountSender sends the DeleteStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteStorageAccountResponder handles the response to the DeleteStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteStorageAccountResponder(resp *http.Response) (result DeletedStorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Encrypt the ENCRYPT operation encrypts an arbitrary sequence of bytes using an encryption key that is stored in -// Azure Key Vault. Note that the ENCRYPT operation only supports a single block of data, the size of which is -// dependent on the target key and the encryption algorithm to be used. The ENCRYPT operation is only strictly -// necessary for symmetric keys stored in Azure Key Vault since protection with an asymmetric key can be performed -// using public portion of the key. This operation is supported for asymmetric keys as a convenience for callers that -// have a key-reference but do not have access to the public key material. This operation requires the keys/encrypt -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the encryption operation. -func (client BaseClient) Encrypt(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Encrypt") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Encrypt", err.Error()) - } - - req, err := client.EncryptPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", nil, "Failure preparing request") - return - } - - resp, err := client.EncryptSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", resp, "Failure sending request") - return - } - - result, err = client.EncryptResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", resp, "Failure responding to request") - return - } - - return -} - -// EncryptPreparer prepares the Encrypt request. -func (client BaseClient) EncryptPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/encrypt", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// EncryptSender sends the Encrypt request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) EncryptSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// EncryptResponder handles the response to the Encrypt request. The method always -// closes the http.Response Body. -func (client BaseClient) EncryptResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificate gets information about a specific certificate. This operation requires the certificates/get -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in the given vault. -// certificateVersion - the version of the certificate. This URI fragment is optional. If not specified, the -// latest version of the certificate is returned. -func (client BaseClient) GetCertificate(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificatePreparer(ctx, vaultBaseURL, certificateName, certificateVersion) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificatePreparer prepares the GetCertificate request. -func (client BaseClient) GetCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - "certificate-version": autorest.Encode("path", certificateVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/{certificate-version}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateSender sends the GetCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateResponder handles the response to the GetCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificateContacts the GetCertificateContacts operation returns the set of certificate contact resources in the -// specified key vault. This operation requires the certificates/managecontacts permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -func (client BaseClient) GetCertificateContacts(ctx context.Context, vaultBaseURL string) (result Contacts, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateContacts") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificateContactsPreparer(ctx, vaultBaseURL) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateContactsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateContactsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificateContactsPreparer prepares the GetCertificateContacts request. -func (client BaseClient) GetCertificateContactsPreparer(ctx context.Context, vaultBaseURL string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/contacts"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateContactsSender sends the GetCertificateContacts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateContactsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateContactsResponder handles the response to the GetCertificateContacts request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificateIssuer the GetCertificateIssuer operation returns the specified certificate issuer resources in the -// specified key vault. This operation requires the certificates/manageissuers/getissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -func (client BaseClient) GetCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificateIssuerPreparer prepares the GetCertificateIssuer request. -func (client BaseClient) GetCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateIssuerSender sends the GetCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateIssuerResponder handles the response to the GetCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificateIssuers the GetCertificateIssuers operation returns the set of certificate issuer resources in the -// specified key vault. This operation requires the certificates/manageissuers/getissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetCertificateIssuers(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateIssuerListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuers") - defer func() { - sc := -1 - if result.cilr.Response.Response != nil { - sc = result.cilr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetCertificateIssuers", err.Error()) - } - - result.fn = client.getCertificateIssuersNextResults - req, err := client.GetCertificateIssuersPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateIssuersSender(req) - if err != nil { - result.cilr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", resp, "Failure sending request") - return - } - - result.cilr, err = client.GetCertificateIssuersResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", resp, "Failure responding to request") - return - } - if result.cilr.hasNextLink() && result.cilr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetCertificateIssuersPreparer prepares the GetCertificateIssuers request. -func (client BaseClient) GetCertificateIssuersPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/issuers"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateIssuersSender sends the GetCertificateIssuers request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateIssuersSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateIssuersResponder handles the response to the GetCertificateIssuers request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateIssuersResponder(resp *http.Response) (result CertificateIssuerListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getCertificateIssuersNextResults retrieves the next set of results, if any. -func (client BaseClient) getCertificateIssuersNextResults(ctx context.Context, lastResults CertificateIssuerListResult) (result CertificateIssuerListResult, err error) { - req, err := lastResults.certificateIssuerListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetCertificateIssuersSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", resp, "Failure sending next results request") - } - result, err = client.GetCertificateIssuersResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetCertificateIssuersComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetCertificateIssuersComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateIssuerListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuers") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetCertificateIssuers(ctx, vaultBaseURL, maxresults) - return -} - -// GetCertificateOperation gets the creation operation associated with a specified certificate. This operation requires -// the certificates/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) GetCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateOperation") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificateOperationPreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateOperationSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateOperationResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificateOperationPreparer prepares the GetCertificateOperation request. -func (client BaseClient) GetCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateOperationSender sends the GetCertificateOperation request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateOperationResponder handles the response to the GetCertificateOperation request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificatePolicy the GetCertificatePolicy operation returns the specified certificate policy resources in the -// specified key vault. This operation requires the certificates/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in a given key vault. -func (client BaseClient) GetCertificatePolicy(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificatePolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificatePolicy") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificatePolicyPreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificatePolicySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", resp, "Failure sending request") - return - } - - result, err = client.GetCertificatePolicyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificatePolicyPreparer prepares the GetCertificatePolicy request. -func (client BaseClient) GetCertificatePolicyPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/policy", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificatePolicySender sends the GetCertificatePolicy request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificatePolicySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificatePolicyResponder handles the response to the GetCertificatePolicy request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificatePolicyResponder(resp *http.Response) (result CertificatePolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificates the GetCertificates operation returns the set of certificates resources in the specified key vault. -// This operation requires the certificates/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -// includePending - specifies whether to include certificates which are not completely provisioned. -func (client BaseClient) GetCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result CertificateListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificates") - defer func() { - sc := -1 - if result.clr.Response.Response != nil { - sc = result.clr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetCertificates", err.Error()) - } - - result.fn = client.getCertificatesNextResults - req, err := client.GetCertificatesPreparer(ctx, vaultBaseURL, maxresults, includePending) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificatesSender(req) - if err != nil { - result.clr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", resp, "Failure sending request") - return - } - - result.clr, err = client.GetCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", resp, "Failure responding to request") - return - } - if result.clr.hasNextLink() && result.clr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetCertificatesPreparer prepares the GetCertificates request. -func (client BaseClient) GetCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - if includePending != nil { - queryParameters["includePending"] = autorest.Encode("query", *includePending) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificatesSender sends the GetCertificates request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificatesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificatesResponder handles the response to the GetCertificates request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificatesResponder(resp *http.Response) (result CertificateListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getCertificatesNextResults retrieves the next set of results, if any. -func (client BaseClient) getCertificatesNextResults(ctx context.Context, lastResults CertificateListResult) (result CertificateListResult, err error) { - req, err := lastResults.certificateListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetCertificatesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", resp, "Failure sending next results request") - } - result, err = client.GetCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetCertificatesComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result CertificateListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificates") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetCertificates(ctx, vaultBaseURL, maxresults, includePending) - return -} - -// GetCertificateVersions the GetCertificateVersions operation returns the versions of a certificate in the specified -// key vault. This operation requires the certificates/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetCertificateVersions(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (result CertificateListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateVersions") - defer func() { - sc := -1 - if result.clr.Response.Response != nil { - sc = result.clr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetCertificateVersions", err.Error()) - } - - result.fn = client.getCertificateVersionsNextResults - req, err := client.GetCertificateVersionsPreparer(ctx, vaultBaseURL, certificateName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateVersionsSender(req) - if err != nil { - result.clr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", resp, "Failure sending request") - return - } - - result.clr, err = client.GetCertificateVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", resp, "Failure responding to request") - return - } - if result.clr.hasNextLink() && result.clr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetCertificateVersionsPreparer prepares the GetCertificateVersions request. -func (client BaseClient) GetCertificateVersionsPreparer(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/versions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateVersionsSender sends the GetCertificateVersions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateVersionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateVersionsResponder handles the response to the GetCertificateVersions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateVersionsResponder(resp *http.Response) (result CertificateListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getCertificateVersionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getCertificateVersionsNextResults(ctx context.Context, lastResults CertificateListResult) (result CertificateListResult, err error) { - req, err := lastResults.certificateListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetCertificateVersionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetCertificateVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetCertificateVersionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetCertificateVersionsComplete(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (result CertificateListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateVersions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetCertificateVersions(ctx, vaultBaseURL, certificateName, maxresults) - return -} - -// GetDeletedCertificate the GetDeletedCertificate operation retrieves the deleted certificate information plus its -// attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This -// operation requires the certificates/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate -func (client BaseClient) GetDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result DeletedCertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedCertificatePreparer prepares the GetDeletedCertificate request. -func (client BaseClient) GetDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedcertificates/{certificate-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedCertificateSender sends the GetDeletedCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedCertificateResponder handles the response to the GetDeletedCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedCertificateResponder(resp *http.Response) (result DeletedCertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedCertificates the GetDeletedCertificates operation retrieves the certificates in the current vault which -// are in a deleted state and ready for recovery or purging. This operation includes deletion-specific information. -// This operation requires the certificates/get/list permission. This operation can only be enabled on soft-delete -// enabled vaults. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -// includePending - specifies whether to include certificates which are not completely provisioned. -func (client BaseClient) GetDeletedCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result DeletedCertificateListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificates") - defer func() { - sc := -1 - if result.dclr.Response.Response != nil { - sc = result.dclr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedCertificates", err.Error()) - } - - result.fn = client.getDeletedCertificatesNextResults - req, err := client.GetDeletedCertificatesPreparer(ctx, vaultBaseURL, maxresults, includePending) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedCertificatesSender(req) - if err != nil { - result.dclr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", resp, "Failure sending request") - return - } - - result.dclr, err = client.GetDeletedCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", resp, "Failure responding to request") - return - } - if result.dclr.hasNextLink() && result.dclr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedCertificatesPreparer prepares the GetDeletedCertificates request. -func (client BaseClient) GetDeletedCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - if includePending != nil { - queryParameters["includePending"] = autorest.Encode("query", *includePending) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedcertificates"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedCertificatesSender sends the GetDeletedCertificates request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedCertificatesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedCertificatesResponder handles the response to the GetDeletedCertificates request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedCertificatesResponder(resp *http.Response) (result DeletedCertificateListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedCertificatesNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedCertificatesNextResults(ctx context.Context, lastResults DeletedCertificateListResult) (result DeletedCertificateListResult, err error) { - req, err := lastResults.deletedCertificateListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedCertificatesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedCertificatesComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result DeletedCertificateListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificates") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedCertificates(ctx, vaultBaseURL, maxresults, includePending) - return -} - -// GetDeletedKey the Get Deleted Key operation is applicable for soft-delete enabled vaults. While the operation can be -// invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. This operation requires -// the keys/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -func (client BaseClient) GetDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result DeletedKeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetDeletedKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedKeyPreparer prepares the GetDeletedKey request. -func (client BaseClient) GetDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedkeys/{key-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedKeySender sends the GetDeletedKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedKeyResponder handles the response to the GetDeletedKey request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedKeyResponder(resp *http.Response) (result DeletedKeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedKeys retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public part -// of a deleted key. This operation includes deletion-specific information. The Get Deleted Keys operation is -// applicable for vaults enabled for soft-delete. While the operation can be invoked on any vault, it will return an -// error if invoked on a non soft-delete enabled vault. This operation requires the keys/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedKeys(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedKeyListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKeys") - defer func() { - sc := -1 - if result.dklr.Response.Response != nil { - sc = result.dklr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedKeys", err.Error()) - } - - result.fn = client.getDeletedKeysNextResults - req, err := client.GetDeletedKeysPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedKeysSender(req) - if err != nil { - result.dklr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", resp, "Failure sending request") - return - } - - result.dklr, err = client.GetDeletedKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", resp, "Failure responding to request") - return - } - if result.dklr.hasNextLink() && result.dklr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedKeysPreparer prepares the GetDeletedKeys request. -func (client BaseClient) GetDeletedKeysPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedkeys"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedKeysSender sends the GetDeletedKeys request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedKeysResponder handles the response to the GetDeletedKeys request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedKeysResponder(resp *http.Response) (result DeletedKeyListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedKeysNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedKeysNextResults(ctx context.Context, lastResults DeletedKeyListResult) (result DeletedKeyListResult, err error) { - req, err := lastResults.deletedKeyListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedKeysComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedKeysComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedKeyListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKeys") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedKeys(ctx, vaultBaseURL, maxresults) - return -} - -// GetDeletedSasDefinition the Get Deleted SAS Definition operation returns the specified deleted SAS definition along -// with its attributes. This operation requires the storage/getsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) GetDeletedSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result DeletedSasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedSasDefinition", err.Error()) - } - - req, err := client.GetDeletedSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedSasDefinitionPreparer prepares the GetDeletedSasDefinition request. -func (client BaseClient) GetDeletedSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSasDefinitionSender sends the GetDeletedSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedSasDefinitionResponder handles the response to the GetDeletedSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSasDefinitionResponder(resp *http.Response) (result DeletedSasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedSasDefinitions the Get Deleted Sas Definitions operation returns the SAS definitions that have been -// deleted for a vault enabled for soft-delete. This operation requires the storage/listsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedSasDefinitions(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result DeletedSasDefinitionListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinitions") - defer func() { - sc := -1 - if result.dsdlr.Response.Response != nil { - sc = result.dsdlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedSasDefinitions", err.Error()) - } - - result.fn = client.getDeletedSasDefinitionsNextResults - req, err := client.GetDeletedSasDefinitionsPreparer(ctx, vaultBaseURL, storageAccountName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSasDefinitionsSender(req) - if err != nil { - result.dsdlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", resp, "Failure sending request") - return - } - - result.dsdlr, err = client.GetDeletedSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", resp, "Failure responding to request") - return - } - if result.dsdlr.hasNextLink() && result.dsdlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedSasDefinitionsPreparer prepares the GetDeletedSasDefinitions request. -func (client BaseClient) GetDeletedSasDefinitionsPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSasDefinitionsSender sends the GetDeletedSasDefinitions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSasDefinitionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedSasDefinitionsResponder handles the response to the GetDeletedSasDefinitions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSasDefinitionsResponder(resp *http.Response) (result DeletedSasDefinitionListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedSasDefinitionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedSasDefinitionsNextResults(ctx context.Context, lastResults DeletedSasDefinitionListResult) (result DeletedSasDefinitionListResult, err error) { - req, err := lastResults.deletedSasDefinitionListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedSasDefinitionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedSasDefinitionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedSasDefinitionsComplete(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result DeletedSasDefinitionListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinitions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedSasDefinitions(ctx, vaultBaseURL, storageAccountName, maxresults) - return -} - -// GetDeletedSecret the Get Deleted Secret operation returns the specified deleted secret along with its attributes. -// This operation requires the secrets/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) GetDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result DeletedSecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetDeletedSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedSecretPreparer prepares the GetDeletedSecret request. -func (client BaseClient) GetDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedsecrets/{secret-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSecretSender sends the GetDeletedSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedSecretResponder handles the response to the GetDeletedSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSecretResponder(resp *http.Response) (result DeletedSecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedSecrets the Get Deleted Secrets operation returns the secrets that have been deleted for a vault enabled -// for soft-delete. This operation requires the secrets/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedSecretListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecrets") - defer func() { - sc := -1 - if result.dslr.Response.Response != nil { - sc = result.dslr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedSecrets", err.Error()) - } - - result.fn = client.getDeletedSecretsNextResults - req, err := client.GetDeletedSecretsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSecretsSender(req) - if err != nil { - result.dslr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", resp, "Failure sending request") - return - } - - result.dslr, err = client.GetDeletedSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", resp, "Failure responding to request") - return - } - if result.dslr.hasNextLink() && result.dslr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedSecretsPreparer prepares the GetDeletedSecrets request. -func (client BaseClient) GetDeletedSecretsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedsecrets"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSecretsSender sends the GetDeletedSecrets request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSecretsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedSecretsResponder handles the response to the GetDeletedSecrets request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSecretsResponder(resp *http.Response) (result DeletedSecretListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedSecretsNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedSecretsNextResults(ctx context.Context, lastResults DeletedSecretListResult) (result DeletedSecretListResult, err error) { - req, err := lastResults.deletedSecretListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedSecretsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedSecretsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedSecretsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedSecretListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecrets") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedSecrets(ctx, vaultBaseURL, maxresults) - return -} - -// GetDeletedStorageAccount the Get Deleted Storage Account operation returns the specified deleted storage account -// along with its attributes. This operation requires the storage/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) GetDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result DeletedStorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedStorageAccount", err.Error()) - } - - req, err := client.GetDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedStorageAccountPreparer prepares the GetDeletedStorageAccount request. -func (client BaseClient) GetDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedStorageAccountSender sends the GetDeletedStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedStorageAccountResponder handles the response to the GetDeletedStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedStorageAccountResponder(resp *http.Response) (result DeletedStorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedStorageAccounts the Get Deleted Storage Accounts operation returns the storage accounts that have been -// deleted for a vault enabled for soft-delete. This operation requires the storage/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedStorageAccounts(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedStorageListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccounts") - defer func() { - sc := -1 - if result.dslr.Response.Response != nil { - sc = result.dslr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedStorageAccounts", err.Error()) - } - - result.fn = client.getDeletedStorageAccountsNextResults - req, err := client.GetDeletedStorageAccountsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedStorageAccountsSender(req) - if err != nil { - result.dslr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", resp, "Failure sending request") - return - } - - result.dslr, err = client.GetDeletedStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", resp, "Failure responding to request") - return - } - if result.dslr.hasNextLink() && result.dslr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedStorageAccountsPreparer prepares the GetDeletedStorageAccounts request. -func (client BaseClient) GetDeletedStorageAccountsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedstorage"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedStorageAccountsSender sends the GetDeletedStorageAccounts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedStorageAccountsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedStorageAccountsResponder handles the response to the GetDeletedStorageAccounts request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedStorageAccountsResponder(resp *http.Response) (result DeletedStorageListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedStorageAccountsNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedStorageAccountsNextResults(ctx context.Context, lastResults DeletedStorageListResult) (result DeletedStorageListResult, err error) { - req, err := lastResults.deletedStorageListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedStorageAccountsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedStorageAccountsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedStorageAccountsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedStorageListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccounts") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedStorageAccounts(ctx, vaultBaseURL, maxresults) - return -} - -// GetKey the get key operation is applicable to all key types. If the requested key is symmetric, then no key material -// is released in the response. This operation requires the keys/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key to get. -// keyVersion - adding the version parameter retrieves a specific version of a key. This URI fragment is -// optional. If not specified, the latest version of the key is returned. -func (client BaseClient) GetKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", nil, "Failure preparing request") - return - } - - resp, err := client.GetKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", resp, "Failure sending request") - return - } - - result, err = client.GetKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", resp, "Failure responding to request") - return - } - - return -} - -// GetKeyPreparer prepares the GetKey request. -func (client BaseClient) GetKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetKeySender sends the GetKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetKeyResponder handles the response to the GetKey request. The method always -// closes the http.Response Body. -func (client BaseClient) GetKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetKeys retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public part of a -// stored key. The LIST operation is applicable to all key types, however only the base key identifier, attributes, and -// tags are provided in the response. Individual versions of a key are not listed in the response. This operation -// requires the keys/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetKeys(ctx context.Context, vaultBaseURL string, maxresults *int32) (result KeyListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeys") - defer func() { - sc := -1 - if result.klr.Response.Response != nil { - sc = result.klr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetKeys", err.Error()) - } - - result.fn = client.getKeysNextResults - req, err := client.GetKeysPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", nil, "Failure preparing request") - return - } - - resp, err := client.GetKeysSender(req) - if err != nil { - result.klr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", resp, "Failure sending request") - return - } - - result.klr, err = client.GetKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", resp, "Failure responding to request") - return - } - if result.klr.hasNextLink() && result.klr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetKeysPreparer prepares the GetKeys request. -func (client BaseClient) GetKeysPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/keys"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetKeysSender sends the GetKeys request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetKeysResponder handles the response to the GetKeys request. The method always -// closes the http.Response Body. -func (client BaseClient) GetKeysResponder(resp *http.Response) (result KeyListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getKeysNextResults retrieves the next set of results, if any. -func (client BaseClient) getKeysNextResults(ctx context.Context, lastResults KeyListResult) (result KeyListResult, err error) { - req, err := lastResults.keyListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", resp, "Failure sending next results request") - } - result, err = client.GetKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetKeysComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetKeysComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result KeyListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeys") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetKeys(ctx, vaultBaseURL, maxresults) - return -} - -// GetKeyVersions the full key identifier, attributes, and tags are provided in the response. This operation requires -// the keys/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetKeyVersions(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (result KeyListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeyVersions") - defer func() { - sc := -1 - if result.klr.Response.Response != nil { - sc = result.klr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetKeyVersions", err.Error()) - } - - result.fn = client.getKeyVersionsNextResults - req, err := client.GetKeyVersionsPreparer(ctx, vaultBaseURL, keyName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", nil, "Failure preparing request") - return - } - - resp, err := client.GetKeyVersionsSender(req) - if err != nil { - result.klr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", resp, "Failure sending request") - return - } - - result.klr, err = client.GetKeyVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", resp, "Failure responding to request") - return - } - if result.klr.hasNextLink() && result.klr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetKeyVersionsPreparer prepares the GetKeyVersions request. -func (client BaseClient) GetKeyVersionsPreparer(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/versions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetKeyVersionsSender sends the GetKeyVersions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetKeyVersionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetKeyVersionsResponder handles the response to the GetKeyVersions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetKeyVersionsResponder(resp *http.Response) (result KeyListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getKeyVersionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getKeyVersionsNextResults(ctx context.Context, lastResults KeyListResult) (result KeyListResult, err error) { - req, err := lastResults.keyListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetKeyVersionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetKeyVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetKeyVersionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetKeyVersionsComplete(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (result KeyListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeyVersions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetKeyVersions(ctx, vaultBaseURL, keyName, maxresults) - return -} - -// GetSasDefinition gets information about a SAS definition for the specified storage account. This operation requires -// the storage/getsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) GetSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSasDefinition", err.Error()) - } - - req, err := client.GetSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.GetSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.GetSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// GetSasDefinitionPreparer prepares the GetSasDefinition request. -func (client BaseClient) GetSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSasDefinitionSender sends the GetSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSasDefinitionResponder handles the response to the GetSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetSasDefinitions list storage SAS definitions for the given storage account. This operation requires the -// storage/listsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetSasDefinitions(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result SasDefinitionListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinitions") - defer func() { - sc := -1 - if result.sdlr.Response.Response != nil { - sc = result.sdlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSasDefinitions", err.Error()) - } - - result.fn = client.getSasDefinitionsNextResults - req, err := client.GetSasDefinitionsPreparer(ctx, vaultBaseURL, storageAccountName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", nil, "Failure preparing request") - return - } - - resp, err := client.GetSasDefinitionsSender(req) - if err != nil { - result.sdlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", resp, "Failure sending request") - return - } - - result.sdlr, err = client.GetSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", resp, "Failure responding to request") - return - } - if result.sdlr.hasNextLink() && result.sdlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetSasDefinitionsPreparer prepares the GetSasDefinitions request. -func (client BaseClient) GetSasDefinitionsPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSasDefinitionsSender sends the GetSasDefinitions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSasDefinitionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSasDefinitionsResponder handles the response to the GetSasDefinitions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSasDefinitionsResponder(resp *http.Response) (result SasDefinitionListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getSasDefinitionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getSasDefinitionsNextResults(ctx context.Context, lastResults SasDefinitionListResult) (result SasDefinitionListResult, err error) { - req, err := lastResults.sasDefinitionListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetSasDefinitionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetSasDefinitionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetSasDefinitionsComplete(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result SasDefinitionListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinitions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetSasDefinitions(ctx, vaultBaseURL, storageAccountName, maxresults) - return -} - -// GetSecret the GET operation is applicable to any secret stored in Azure Key Vault. This operation requires the -// secrets/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// secretVersion - the version of the secret. This URI fragment is optional. If not specified, the latest -// version of the secret is returned. -func (client BaseClient) GetSecret(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetSecretPreparer(ctx, vaultBaseURL, secretName, secretVersion) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", nil, "Failure preparing request") - return - } - - resp, err := client.GetSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", resp, "Failure sending request") - return - } - - result, err = client.GetSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", resp, "Failure responding to request") - return - } - - return -} - -// GetSecretPreparer prepares the GetSecret request. -func (client BaseClient) GetSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - "secret-version": autorest.Encode("path", secretVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/{secret-version}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSecretSender sends the GetSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSecretResponder handles the response to the GetSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetSecrets the Get Secrets operation is applicable to the entire vault. However, only the base secret identifier and -// its attributes are provided in the response. Individual secret versions are not listed in the response. This -// operation requires the secrets/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified, the service will return up to -// 25 results. -func (client BaseClient) GetSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (result SecretListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecrets") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSecrets", err.Error()) - } - - result.fn = client.getSecretsNextResults - req, err := client.GetSecretsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", nil, "Failure preparing request") - return - } - - resp, err := client.GetSecretsSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", resp, "Failure sending request") - return - } - - result.slr, err = client.GetSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", resp, "Failure responding to request") - return - } - if result.slr.hasNextLink() && result.slr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetSecretsPreparer prepares the GetSecrets request. -func (client BaseClient) GetSecretsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/secrets"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSecretsSender sends the GetSecrets request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSecretsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSecretsResponder handles the response to the GetSecrets request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSecretsResponder(resp *http.Response) (result SecretListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getSecretsNextResults retrieves the next set of results, if any. -func (client BaseClient) getSecretsNextResults(ctx context.Context, lastResults SecretListResult) (result SecretListResult, err error) { - req, err := lastResults.secretListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetSecretsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetSecretsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetSecretsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result SecretListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecrets") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetSecrets(ctx, vaultBaseURL, maxresults) - return -} - -// GetSecretVersions the full secret identifier and attributes are provided in the response. No values are returned for -// the secrets. This operations requires the secrets/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// maxresults - maximum number of results to return in a page. If not specified, the service will return up to -// 25 results. -func (client BaseClient) GetSecretVersions(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (result SecretListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecretVersions") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSecretVersions", err.Error()) - } - - result.fn = client.getSecretVersionsNextResults - req, err := client.GetSecretVersionsPreparer(ctx, vaultBaseURL, secretName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", nil, "Failure preparing request") - return - } - - resp, err := client.GetSecretVersionsSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", resp, "Failure sending request") - return - } - - result.slr, err = client.GetSecretVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", resp, "Failure responding to request") - return - } - if result.slr.hasNextLink() && result.slr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetSecretVersionsPreparer prepares the GetSecretVersions request. -func (client BaseClient) GetSecretVersionsPreparer(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/versions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSecretVersionsSender sends the GetSecretVersions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSecretVersionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSecretVersionsResponder handles the response to the GetSecretVersions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSecretVersionsResponder(resp *http.Response) (result SecretListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getSecretVersionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getSecretVersionsNextResults(ctx context.Context, lastResults SecretListResult) (result SecretListResult, err error) { - req, err := lastResults.secretListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetSecretVersionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetSecretVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetSecretVersionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetSecretVersionsComplete(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (result SecretListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecretVersions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetSecretVersions(ctx, vaultBaseURL, secretName, maxresults) - return -} - -// GetStorageAccount gets information about a specified storage account. This operation requires the storage/get -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) GetStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetStorageAccount", err.Error()) - } - - req, err := client.GetStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.GetStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.GetStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// GetStorageAccountPreparer prepares the GetStorageAccount request. -func (client BaseClient) GetStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetStorageAccountSender sends the GetStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetStorageAccountResponder handles the response to the GetStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) GetStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetStorageAccounts list storage accounts managed by the specified key vault. This operation requires the -// storage/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetStorageAccounts(ctx context.Context, vaultBaseURL string, maxresults *int32) (result StorageListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccounts") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetStorageAccounts", err.Error()) - } - - result.fn = client.getStorageAccountsNextResults - req, err := client.GetStorageAccountsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", nil, "Failure preparing request") - return - } - - resp, err := client.GetStorageAccountsSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", resp, "Failure sending request") - return - } - - result.slr, err = client.GetStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", resp, "Failure responding to request") - return - } - if result.slr.hasNextLink() && result.slr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetStorageAccountsPreparer prepares the GetStorageAccounts request. -func (client BaseClient) GetStorageAccountsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/storage"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetStorageAccountsSender sends the GetStorageAccounts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetStorageAccountsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetStorageAccountsResponder handles the response to the GetStorageAccounts request. The method always -// closes the http.Response Body. -func (client BaseClient) GetStorageAccountsResponder(resp *http.Response) (result StorageListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getStorageAccountsNextResults retrieves the next set of results, if any. -func (client BaseClient) getStorageAccountsNextResults(ctx context.Context, lastResults StorageListResult) (result StorageListResult, err error) { - req, err := lastResults.storageListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetStorageAccountsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetStorageAccountsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetStorageAccountsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result StorageListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccounts") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetStorageAccounts(ctx, vaultBaseURL, maxresults) - return -} - -// ImportCertificate imports an existing valid certificate, containing a private key, into Azure Key Vault. The -// certificate to be imported can be in either PFX or PEM format. If the certificate is in PEM format the PEM file must -// contain the key as well as x509 certificates. This operation requires the certificates/import permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// parameters - the parameters to import the certificate. -func (client BaseClient) ImportCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateImportParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.ImportCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: certificateName, - Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Base64EncodedCertificate", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.CertificatePolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}}}, - }}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "ImportCertificate", err.Error()) - } - - req, err := client.ImportCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.ImportCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", resp, "Failure sending request") - return - } - - result, err = client.ImportCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", resp, "Failure responding to request") - return - } - - return -} - -// ImportCertificatePreparer prepares the ImportCertificate request. -func (client BaseClient) ImportCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateImportParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/import", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ImportCertificateSender sends the ImportCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) ImportCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ImportCertificateResponder handles the response to the ImportCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) ImportCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ImportKey the import key operation may be used to import any key type into an Azure Key Vault. If the named key -// already exists, Azure Key Vault creates a new version of the key. This operation requires the keys/import -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - name for the imported key. -// parameters - the parameters to import a key. -func (client BaseClient) ImportKey(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyImportParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.ImportKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: keyName, - Constraints: []validation.Constraint{{Target: "keyName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Key", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "ImportKey", err.Error()) - } - - req, err := client.ImportKeyPreparer(ctx, vaultBaseURL, keyName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", nil, "Failure preparing request") - return - } - - resp, err := client.ImportKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", resp, "Failure sending request") - return - } - - result, err = client.ImportKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", resp, "Failure responding to request") - return - } - - return -} - -// ImportKeyPreparer prepares the ImportKey request. -func (client BaseClient) ImportKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyImportParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ImportKeySender sends the ImportKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) ImportKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ImportKeyResponder handles the response to the ImportKey request. The method always -// closes the http.Response Body. -func (client BaseClient) ImportKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// MergeCertificate the MergeCertificate operation performs the merging of a certificate or certificate chain with a -// key pair currently available in the service. This operation requires the certificates/create permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// parameters - the parameters to merge certificate. -func (client BaseClient) MergeCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateMergeParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.MergeCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.X509Certificates", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "MergeCertificate", err.Error()) - } - - req, err := client.MergeCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.MergeCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", resp, "Failure sending request") - return - } - - result, err = client.MergeCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", resp, "Failure responding to request") - return - } - - return -} - -// MergeCertificatePreparer prepares the MergeCertificate request. -func (client BaseClient) MergeCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateMergeParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending/merge", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// MergeCertificateSender sends the MergeCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) MergeCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// MergeCertificateResponder handles the response to the MergeCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) MergeCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// PurgeDeletedCertificate the PurgeDeletedCertificate operation performs an irreversible deletion of the specified -// certificate, without possibility for recovery. The operation is not available if the recovery level does not specify -// 'Purgeable'. This operation requires the certificate/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate -func (client BaseClient) PurgeDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedCertificate") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.PurgeDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedCertificateSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", resp, "Failure responding to request") - return - } - - return -} - -// PurgeDeletedCertificatePreparer prepares the PurgeDeletedCertificate request. -func (client BaseClient) PurgeDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedcertificates/{certificate-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedCertificateSender sends the PurgeDeletedCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// PurgeDeletedCertificateResponder handles the response to the PurgeDeletedCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedCertificateResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// PurgeDeletedKey the Purge Deleted Key operation is applicable for soft-delete enabled vaults. While the operation -// can be invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. This operation -// requires the keys/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key -func (client BaseClient) PurgeDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedKey") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.PurgeDeletedKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedKeySender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", resp, "Failure responding to request") - return - } - - return -} - -// PurgeDeletedKeyPreparer prepares the PurgeDeletedKey request. -func (client BaseClient) PurgeDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedkeys/{key-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedKeySender sends the PurgeDeletedKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// PurgeDeletedKeyResponder handles the response to the PurgeDeletedKey request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedKeyResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// PurgeDeletedSecret the purge deleted secret operation removes the secret permanently, without the possibility of -// recovery. This operation can only be enabled on a soft-delete enabled vault. This operation requires the -// secrets/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) PurgeDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedSecret") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.PurgeDeletedSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedSecretSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", resp, "Failure responding to request") - return - } - - return -} - -// PurgeDeletedSecretPreparer prepares the PurgeDeletedSecret request. -func (client BaseClient) PurgeDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedsecrets/{secret-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedSecretSender sends the PurgeDeletedSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// PurgeDeletedSecretResponder handles the response to the PurgeDeletedSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedSecretResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// PurgeDeletedStorageAccount the purge deleted storage account operation removes the secret permanently, without the -// possibility of recovery. This operation can only be performed on a soft-delete enabled vault. This operation -// requires the storage/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) PurgeDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedStorageAccount") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "PurgeDeletedStorageAccount", err.Error()) - } - - req, err := client.PurgeDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedStorageAccountSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// PurgeDeletedStorageAccountPreparer prepares the PurgeDeletedStorageAccount request. -func (client BaseClient) PurgeDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedStorageAccountSender sends the PurgeDeletedStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// PurgeDeletedStorageAccountResponder handles the response to the PurgeDeletedStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedStorageAccountResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// RecoverDeletedCertificate the RecoverDeletedCertificate operation performs the reversal of the Delete operation. The -// operation is applicable in vaults enabled for soft-delete, and must be issued during the retention interval -// (available in the deleted certificate's attributes). This operation requires the certificates/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the deleted certificate -func (client BaseClient) RecoverDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.RecoverDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedCertificatePreparer prepares the RecoverDeletedCertificate request. -func (client BaseClient) RecoverDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedcertificates/{certificate-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedCertificateSender sends the RecoverDeletedCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedCertificateResponder handles the response to the RecoverDeletedCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedKey the Recover Deleted Key operation is applicable for deleted keys in soft-delete enabled vaults. It -// recovers the deleted key back to its latest version under /keys. An attempt to recover an non-deleted key will -// return an error. Consider this the inverse of the delete operation on soft-delete enabled vaults. This operation -// requires the keys/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the deleted key. -func (client BaseClient) RecoverDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.RecoverDeletedKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedKeyPreparer prepares the RecoverDeletedKey request. -func (client BaseClient) RecoverDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedkeys/{key-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedKeySender sends the RecoverDeletedKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedKeyResponder handles the response to the RecoverDeletedKey request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedSasDefinition recovers the deleted SAS definition for the specified storage account. This operation -// can only be performed on a soft-delete enabled vault. This operation requires the storage/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) RecoverDeletedSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RecoverDeletedSasDefinition", err.Error()) - } - - req, err := client.RecoverDeletedSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedSasDefinitionPreparer prepares the RecoverDeletedSasDefinition request. -func (client BaseClient) RecoverDeletedSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas/{sas-definition-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedSasDefinitionSender sends the RecoverDeletedSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedSasDefinitionResponder handles the response to the RecoverDeletedSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedSecret recovers the deleted secret in the specified vault. This operation can only be performed on a -// soft-delete enabled vault. This operation requires the secrets/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the deleted secret. -func (client BaseClient) RecoverDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.RecoverDeletedSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedSecretPreparer prepares the RecoverDeletedSecret request. -func (client BaseClient) RecoverDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedsecrets/{secret-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedSecretSender sends the RecoverDeletedSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedSecretResponder handles the response to the RecoverDeletedSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedStorageAccount recovers the deleted storage account in the specified vault. This operation can only be -// performed on a soft-delete enabled vault. This operation requires the storage/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) RecoverDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RecoverDeletedStorageAccount", err.Error()) - } - - req, err := client.RecoverDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedStorageAccountPreparer prepares the RecoverDeletedStorageAccount request. -func (client BaseClient) RecoverDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedStorageAccountSender sends the RecoverDeletedStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedStorageAccountResponder handles the response to the RecoverDeletedStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RegenerateStorageAccountKey regenerates the specified key value for the given storage account. This operation -// requires the storage/regeneratekey permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// parameters - the parameters to regenerate storage account key. -func (client BaseClient) RegenerateStorageAccountKey(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountRegenerteKeyParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RegenerateStorageAccountKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RegenerateStorageAccountKey", err.Error()) - } - - req, err := client.RegenerateStorageAccountKeyPreparer(ctx, vaultBaseURL, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", nil, "Failure preparing request") - return - } - - resp, err := client.RegenerateStorageAccountKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", resp, "Failure sending request") - return - } - - result, err = client.RegenerateStorageAccountKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", resp, "Failure responding to request") - return - } - - return -} - -// RegenerateStorageAccountKeyPreparer prepares the RegenerateStorageAccountKey request. -func (client BaseClient) RegenerateStorageAccountKeyPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountRegenerteKeyParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/regeneratekey", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RegenerateStorageAccountKeySender sends the RegenerateStorageAccountKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RegenerateStorageAccountKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RegenerateStorageAccountKeyResponder handles the response to the RegenerateStorageAccountKey request. The method always -// closes the http.Response Body. -func (client BaseClient) RegenerateStorageAccountKeyResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreCertificate restores a backed up certificate, and all its versions, to a vault. This operation requires the -// certificates/restore permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the certificate. -func (client BaseClient) RestoreCertificate(ctx context.Context, vaultBaseURL string, parameters CertificateRestoreParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CertificateBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreCertificate", err.Error()) - } - - req, err := client.RestoreCertificatePreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", resp, "Failure sending request") - return - } - - result, err = client.RestoreCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", resp, "Failure responding to request") - return - } - - return -} - -// RestoreCertificatePreparer prepares the RestoreCertificate request. -func (client BaseClient) RestoreCertificatePreparer(ctx context.Context, vaultBaseURL string, parameters CertificateRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreCertificateSender sends the RestoreCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RestoreCertificateResponder handles the response to the RestoreCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreKey imports a previously backed up key into Azure Key Vault, restoring the key, its key identifier, -// attributes and access control policies. The RESTORE operation may be used to import a previously backed up key. -// Individual versions of a key cannot be restored. The key is restored in its entirety with the same key name as it -// had when it was backed up. If the key name is not available in the target Key Vault, the RESTORE operation will be -// rejected. While the key name is retained during restore, the final key identifier will change if the key is restored -// to a different vault. Restore will restore all versions and preserve version identifiers. The RESTORE operation is -// subject to security constraints: The target Key Vault must be owned by the same Microsoft Azure Subscription as the -// source Key Vault The user must have RESTORE permission in the target Key Vault. This operation requires the -// keys/restore permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the key. -func (client BaseClient) RestoreKey(ctx context.Context, vaultBaseURL string, parameters KeyRestoreParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.KeyBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreKey", err.Error()) - } - - req, err := client.RestoreKeyPreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", resp, "Failure sending request") - return - } - - result, err = client.RestoreKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", resp, "Failure responding to request") - return - } - - return -} - -// RestoreKeyPreparer prepares the RestoreKey request. -func (client BaseClient) RestoreKeyPreparer(ctx context.Context, vaultBaseURL string, parameters KeyRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/keys/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreKeySender sends the RestoreKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RestoreKeyResponder handles the response to the RestoreKey request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreSecret restores a backed up secret, and all its versions, to a vault. This operation requires the -// secrets/restore permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the secret. -func (client BaseClient) RestoreSecret(ctx context.Context, vaultBaseURL string, parameters SecretRestoreParameters) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.SecretBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreSecret", err.Error()) - } - - req, err := client.RestoreSecretPreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", resp, "Failure sending request") - return - } - - result, err = client.RestoreSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", resp, "Failure responding to request") - return - } - - return -} - -// RestoreSecretPreparer prepares the RestoreSecret request. -func (client BaseClient) RestoreSecretPreparer(ctx context.Context, vaultBaseURL string, parameters SecretRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/secrets/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreSecretSender sends the RestoreSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RestoreSecretResponder handles the response to the RestoreSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreStorageAccount restores a backed up storage account to a vault. This operation requires the storage/restore -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the storage account. -func (client BaseClient) RestoreStorageAccount(ctx context.Context, vaultBaseURL string, parameters StorageRestoreParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.StorageBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreStorageAccount", err.Error()) - } - - req, err := client.RestoreStorageAccountPreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.RestoreStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// RestoreStorageAccountPreparer prepares the RestoreStorageAccount request. -func (client BaseClient) RestoreStorageAccountPreparer(ctx context.Context, vaultBaseURL string, parameters StorageRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/storage/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreStorageAccountSender sends the RestoreStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RestoreStorageAccountResponder handles the response to the RestoreStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetCertificateContacts sets the certificate contacts for the specified key vault. This operation requires the -// certificates/managecontacts permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// contacts - the contacts for the key vault certificate. -func (client BaseClient) SetCertificateContacts(ctx context.Context, vaultBaseURL string, contacts Contacts) (result Contacts, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetCertificateContacts") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.SetCertificateContactsPreparer(ctx, vaultBaseURL, contacts) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", nil, "Failure preparing request") - return - } - - resp, err := client.SetCertificateContactsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", resp, "Failure sending request") - return - } - - result, err = client.SetCertificateContactsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", resp, "Failure responding to request") - return - } - - return -} - -// SetCertificateContactsPreparer prepares the SetCertificateContacts request. -func (client BaseClient) SetCertificateContactsPreparer(ctx context.Context, vaultBaseURL string, contacts Contacts) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - contacts.ID = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/contacts"), - autorest.WithJSON(contacts), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetCertificateContactsSender sends the SetCertificateContacts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetCertificateContactsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetCertificateContactsResponder handles the response to the SetCertificateContacts request. The method always -// closes the http.Response Body. -func (client BaseClient) SetCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetCertificateIssuer the SetCertificateIssuer operation adds or updates the specified certificate issuer. This -// operation requires the certificates/setissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -// parameter - certificate issuer set parameter. -func (client BaseClient) SetCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerSetParameters) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameter, - Constraints: []validation.Constraint{{Target: "parameter.Provider", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetCertificateIssuer", err.Error()) - } - - req, err := client.SetCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName, parameter) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.SetCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.SetCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", resp, "Failure responding to request") - return - } - - return -} - -// SetCertificateIssuerPreparer prepares the SetCertificateIssuer request. -func (client BaseClient) SetCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerSetParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithJSON(parameter), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetCertificateIssuerSender sends the SetCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetCertificateIssuerResponder handles the response to the SetCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) SetCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetSasDefinition creates or updates a new SAS definition for the specified storage account. This operation requires -// the storage/setsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -// parameters - the parameters to create a SAS definition. -func (client BaseClient) SetSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionCreateParameters) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.TemplateURI", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.ValidityPeriod", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetSasDefinition", err.Error()) - } - - req, err := client.SetSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.SetSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.SetSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// SetSasDefinitionPreparer prepares the SetSasDefinition request. -func (client BaseClient) SetSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetSasDefinitionSender sends the SetSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetSasDefinitionResponder handles the response to the SetSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) SetSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetSecret the SET operation adds a secret to the Azure Key Vault. If the named secret already exists, Azure Key -// Vault creates a new version of that secret. This operation requires the secrets/set permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// parameters - the parameters for setting the secret. -func (client BaseClient) SetSecret(ctx context.Context, vaultBaseURL string, secretName string, parameters SecretSetParameters) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: secretName, - Constraints: []validation.Constraint{{Target: "secretName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetSecret", err.Error()) - } - - req, err := client.SetSecretPreparer(ctx, vaultBaseURL, secretName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", nil, "Failure preparing request") - return - } - - resp, err := client.SetSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", resp, "Failure sending request") - return - } - - result, err = client.SetSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", resp, "Failure responding to request") - return - } - - return -} - -// SetSecretPreparer prepares the SetSecret request. -func (client BaseClient) SetSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, parameters SecretSetParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetSecretSender sends the SetSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetSecretResponder handles the response to the SetSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) SetSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetStorageAccount creates or updates a new storage account. This operation requires the storage/set permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// parameters - the parameters to create a storage account. -func (client BaseClient) SetStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountCreateParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.ResourceID", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.ActiveKeyName", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.AutoRegenerateKey", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetStorageAccount", err.Error()) - } - - req, err := client.SetStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.SetStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.SetStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// SetStorageAccountPreparer prepares the SetStorageAccount request. -func (client BaseClient) SetStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetStorageAccountSender sends the SetStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetStorageAccountResponder handles the response to the SetStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) SetStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Sign the SIGN operation is applicable to asymmetric and symmetric keys stored in Azure Key Vault since this -// operation uses the private portion of the key. This operation requires the keys/sign permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the signing operation. -func (client BaseClient) Sign(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeySignParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Sign") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Sign", err.Error()) - } - - req, err := client.SignPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", nil, "Failure preparing request") - return - } - - resp, err := client.SignSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", resp, "Failure sending request") - return - } - - result, err = client.SignResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", resp, "Failure responding to request") - return - } - - return -} - -// SignPreparer prepares the Sign request. -func (client BaseClient) SignPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeySignParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/sign", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SignSender sends the Sign request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SignSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SignResponder handles the response to the Sign request. The method always -// closes the http.Response Body. -func (client BaseClient) SignResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UnwrapKey the UNWRAP operation supports decryption of a symmetric key using the target key encryption key. This -// operation is the reverse of the WRAP operation. The UNWRAP operation applies to asymmetric and symmetric keys stored -// in Azure Key Vault since it uses the private portion of the key. This operation requires the keys/unwrapKey -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the key operation. -func (client BaseClient) UnwrapKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UnwrapKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "UnwrapKey", err.Error()) - } - - req, err := client.UnwrapKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", nil, "Failure preparing request") - return - } - - resp, err := client.UnwrapKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", resp, "Failure sending request") - return - } - - result, err = client.UnwrapKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", resp, "Failure responding to request") - return - } - - return -} - -// UnwrapKeyPreparer prepares the UnwrapKey request. -func (client BaseClient) UnwrapKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/unwrapkey", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UnwrapKeySender sends the UnwrapKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UnwrapKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UnwrapKeyResponder handles the response to the UnwrapKey request. The method always -// closes the http.Response Body. -func (client BaseClient) UnwrapKeyResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificate the UpdateCertificate operation applies the specified update on the given certificate; the only -// elements updated are the certificate's attributes. This operation requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in the given key vault. -// certificateVersion - the version of the certificate. -// parameters - the parameters for certificate update. -func (client BaseClient) UpdateCertificate(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string, parameters CertificateUpdateParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificatePreparer(ctx, vaultBaseURL, certificateName, certificateVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", resp, "Failure responding to request") - return - } - - return -} - -// UpdateCertificatePreparer prepares the UpdateCertificate request. -func (client BaseClient) UpdateCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string, parameters CertificateUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - "certificate-version": autorest.Encode("path", certificateVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/{certificate-version}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificateSender sends the UpdateCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateCertificateResponder handles the response to the UpdateCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificateIssuer the UpdateCertificateIssuer operation performs an update on the specified certificate issuer -// entity. This operation requires the certificates/setissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -// parameter - certificate issuer update parameter. -func (client BaseClient) UpdateCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerUpdateParameters) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName, parameter) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", resp, "Failure responding to request") - return - } - - return -} - -// UpdateCertificateIssuerPreparer prepares the UpdateCertificateIssuer request. -func (client BaseClient) UpdateCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithJSON(parameter), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificateIssuerSender sends the UpdateCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateCertificateIssuerResponder handles the response to the UpdateCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificateOperation updates a certificate creation operation that is already in progress. This operation -// requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// certificateOperation - the certificate operation response. -func (client BaseClient) UpdateCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string, certificateOperation CertificateOperationUpdateParameter) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificateOperation") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificateOperationPreparer(ctx, vaultBaseURL, certificateName, certificateOperation) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificateOperationSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificateOperationResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", resp, "Failure responding to request") - return - } - - return -} - -// UpdateCertificateOperationPreparer prepares the UpdateCertificateOperation request. -func (client BaseClient) UpdateCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateOperation CertificateOperationUpdateParameter) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), - autorest.WithJSON(certificateOperation), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificateOperationSender sends the UpdateCertificateOperation request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificateOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateCertificateOperationResponder handles the response to the UpdateCertificateOperation request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificatePolicy set specified members in the certificate policy. Leave others as null. This operation -// requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in the given vault. -// certificatePolicy - the policy for the certificate. -func (client BaseClient) UpdateCertificatePolicy(ctx context.Context, vaultBaseURL string, certificateName string, certificatePolicy CertificatePolicy) (result CertificatePolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificatePolicy") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificatePolicyPreparer(ctx, vaultBaseURL, certificateName, certificatePolicy) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificatePolicySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificatePolicyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", resp, "Failure responding to request") - return - } - - return -} - -// UpdateCertificatePolicyPreparer prepares the UpdateCertificatePolicy request. -func (client BaseClient) UpdateCertificatePolicyPreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificatePolicy CertificatePolicy) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - certificatePolicy.ID = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/policy", pathParameters), - autorest.WithJSON(certificatePolicy), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificatePolicySender sends the UpdateCertificatePolicy request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificatePolicySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateCertificatePolicyResponder handles the response to the UpdateCertificatePolicy request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificatePolicyResponder(resp *http.Response) (result CertificatePolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateKey in order to perform this operation, the key must already exist in the Key Vault. Note: The cryptographic -// material of a key itself cannot be changed. This operation requires the keys/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of key to update. -// keyVersion - the version of the key to update. -// parameters - the parameters of the key to update. -func (client BaseClient) UpdateKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", resp, "Failure sending request") - return - } - - result, err = client.UpdateKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", resp, "Failure responding to request") - return - } - - return -} - -// UpdateKeyPreparer prepares the UpdateKey request. -func (client BaseClient) UpdateKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateKeySender sends the UpdateKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateKeyResponder handles the response to the UpdateKey request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateSasDefinition updates the specified attributes associated with the given SAS definition. This operation -// requires the storage/setsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -// parameters - the parameters to update a SAS definition. -func (client BaseClient) UpdateSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionUpdateParameters) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "UpdateSasDefinition", err.Error()) - } - - req, err := client.UpdateSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.UpdateSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// UpdateSasDefinitionPreparer prepares the UpdateSasDefinition request. -func (client BaseClient) UpdateSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSasDefinitionSender sends the UpdateSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateSasDefinitionResponder handles the response to the UpdateSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateSecret the UPDATE operation changes specified attributes of an existing stored secret. Attributes that are not -// specified in the request are left unchanged. The value of a secret itself cannot be changed. This operation requires -// the secrets/set permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// secretVersion - the version of the secret. -// parameters - the parameters for update secret operation. -func (client BaseClient) UpdateSecret(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string, parameters SecretUpdateParameters) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateSecretPreparer(ctx, vaultBaseURL, secretName, secretVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", resp, "Failure sending request") - return - } - - result, err = client.UpdateSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", resp, "Failure responding to request") - return - } - - return -} - -// UpdateSecretPreparer prepares the UpdateSecret request. -func (client BaseClient) UpdateSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string, parameters SecretUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - "secret-version": autorest.Encode("path", secretVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/{secret-version}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSecretSender sends the UpdateSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateSecretResponder handles the response to the UpdateSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateStorageAccount updates the specified attributes associated with the given storage account. This operation -// requires the storage/set/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// parameters - the parameters to update a storage account. -func (client BaseClient) UpdateStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountUpdateParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "UpdateStorageAccount", err.Error()) - } - - req, err := client.UpdateStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.UpdateStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// UpdateStorageAccountPreparer prepares the UpdateStorageAccount request. -func (client BaseClient) UpdateStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateStorageAccountSender sends the UpdateStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateStorageAccountResponder handles the response to the UpdateStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Verify the VERIFY operation is applicable to symmetric keys stored in Azure Key Vault. VERIFY is not strictly -// necessary for asymmetric keys stored in Azure Key Vault since signature verification can be performed using the -// public portion of the key but this operation is supported as a convenience for callers that only have a -// key-reference and not the public portion of the key. This operation requires the keys/verify permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for verify operations. -func (client BaseClient) Verify(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyVerifyParameters) (result KeyVerifyResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Verify") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Digest", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.Signature", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Verify", err.Error()) - } - - req, err := client.VerifyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", nil, "Failure preparing request") - return - } - - resp, err := client.VerifySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", resp, "Failure sending request") - return - } - - result, err = client.VerifyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", resp, "Failure responding to request") - return - } - - return -} - -// VerifyPreparer prepares the Verify request. -func (client BaseClient) VerifyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyVerifyParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/verify", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// VerifySender sends the Verify request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) VerifySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// VerifyResponder handles the response to the Verify request. The method always -// closes the http.Response Body. -func (client BaseClient) VerifyResponder(resp *http.Response) (result KeyVerifyResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// WrapKey the WRAP operation supports encryption of a symmetric key using a key encryption key that has previously -// been stored in an Azure Key Vault. The WRAP operation is only strictly necessary for symmetric keys stored in Azure -// Key Vault since protection with an asymmetric key can be performed using the public portion of the key. This -// operation is supported for asymmetric keys as a convenience for callers that have a key-reference but do not have -// access to the public key material. This operation requires the keys/wrapKey permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for wrap operation. -func (client BaseClient) WrapKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.WrapKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "WrapKey", err.Error()) - } - - req, err := client.WrapKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", nil, "Failure preparing request") - return - } - - resp, err := client.WrapKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", resp, "Failure sending request") - return - } - - result, err = client.WrapKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", resp, "Failure responding to request") - return - } - - return -} - -// WrapKeyPreparer prepares the WrapKey request. -func (client BaseClient) WrapKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.1" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/wrapkey", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// WrapKeySender sends the WrapKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) WrapKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// WrapKeyResponder handles the response to the WrapKey request. The method always -// closes the http.Response Body. -func (client BaseClient) WrapKeyResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/dataplane_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/dataplane_meta.json deleted file mode 100644 index 311f23dc..00000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/dataplane_meta.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", - "readme": "/_/azure-rest-api-specs/specification/keyvault/data-plane/readme.md", - "tag": "package-7.1", - "use": "@microsoft.azure/autorest.go@2.1.183", - "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.183 --tag=package-7.1 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/keyvault/data-plane/readme.md", - "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" - } -} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/enums.go deleted file mode 100644 index 5c4dbbce..00000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/enums.go +++ /dev/null @@ -1,231 +0,0 @@ -package keyvault - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// ActionType enumerates the values for action type. -type ActionType string - -const ( - // AutoRenew ... - AutoRenew ActionType = "AutoRenew" - // EmailContacts ... - EmailContacts ActionType = "EmailContacts" -) - -// PossibleActionTypeValues returns an array of possible values for the ActionType const type. -func PossibleActionTypeValues() []ActionType { - return []ActionType{AutoRenew, EmailContacts} -} - -// DeletionRecoveryLevel enumerates the values for deletion recovery level. -type DeletionRecoveryLevel string - -const ( - // CustomizedRecoverable Denotes a vault state in which deletion is recoverable without the possibility for - // immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90).This level - // guarantees the recoverability of the deleted entity during the retention interval and while the - // subscription is still available. - CustomizedRecoverable DeletionRecoveryLevel = "CustomizedRecoverable" - // CustomizedRecoverableProtectedSubscription Denotes a vault and subscription state in which deletion is - // recoverable, immediate and permanent deletion (i.e. purge) is not permitted, and in which the - // subscription itself cannot be permanently canceled when 7<= SoftDeleteRetentionInDays < 90. This level - // guarantees the recoverability of the deleted entity during the retention interval, and also reflects the - // fact that the subscription itself cannot be cancelled. - CustomizedRecoverableProtectedSubscription DeletionRecoveryLevel = "CustomizedRecoverable+ProtectedSubscription" - // CustomizedRecoverablePurgeable Denotes a vault state in which deletion is recoverable, and which also - // permits immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90). This - // level guarantees the recoverability of the deleted entity during the retention interval, unless a Purge - // operation is requested, or the subscription is cancelled. - CustomizedRecoverablePurgeable DeletionRecoveryLevel = "CustomizedRecoverable+Purgeable" - // Purgeable Denotes a vault state in which deletion is an irreversible operation, without the possibility - // for recovery. This level corresponds to no protection being available against a Delete operation; the - // data is irretrievably lost upon accepting a Delete operation at the entity level or higher (vault, - // resource group, subscription etc.) - Purgeable DeletionRecoveryLevel = "Purgeable" - // Recoverable Denotes a vault state in which deletion is recoverable without the possibility for immediate - // and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted entity - // during the retention interval(90 days) and while the subscription is still available. System wil - // permanently delete it after 90 days, if not recovered - Recoverable DeletionRecoveryLevel = "Recoverable" - // RecoverableProtectedSubscription Denotes a vault and subscription state in which deletion is recoverable - // within retention interval (90 days), immediate and permanent deletion (i.e. purge) is not permitted, and - // in which the subscription itself cannot be permanently canceled. System wil permanently delete it after - // 90 days, if not recovered - RecoverableProtectedSubscription DeletionRecoveryLevel = "Recoverable+ProtectedSubscription" - // RecoverablePurgeable Denotes a vault state in which deletion is recoverable, and which also permits - // immediate and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted - // entity during the retention interval (90 days), unless a Purge operation is requested, or the - // subscription is cancelled. System wil permanently delete it after 90 days, if not recovered - RecoverablePurgeable DeletionRecoveryLevel = "Recoverable+Purgeable" -) - -// PossibleDeletionRecoveryLevelValues returns an array of possible values for the DeletionRecoveryLevel const type. -func PossibleDeletionRecoveryLevelValues() []DeletionRecoveryLevel { - return []DeletionRecoveryLevel{CustomizedRecoverable, CustomizedRecoverableProtectedSubscription, CustomizedRecoverablePurgeable, Purgeable, Recoverable, RecoverableProtectedSubscription, RecoverablePurgeable} -} - -// JSONWebKeyCurveName enumerates the values for json web key curve name. -type JSONWebKeyCurveName string - -const ( - // P256 ... - P256 JSONWebKeyCurveName = "P-256" - // P256K ... - P256K JSONWebKeyCurveName = "P-256K" - // P384 ... - P384 JSONWebKeyCurveName = "P-384" - // P521 ... - P521 JSONWebKeyCurveName = "P-521" -) - -// PossibleJSONWebKeyCurveNameValues returns an array of possible values for the JSONWebKeyCurveName const type. -func PossibleJSONWebKeyCurveNameValues() []JSONWebKeyCurveName { - return []JSONWebKeyCurveName{P256, P256K, P384, P521} -} - -// JSONWebKeyEncryptionAlgorithm enumerates the values for json web key encryption algorithm. -type JSONWebKeyEncryptionAlgorithm string - -const ( - // RSA15 ... - RSA15 JSONWebKeyEncryptionAlgorithm = "RSA1_5" - // RSAOAEP ... - RSAOAEP JSONWebKeyEncryptionAlgorithm = "RSA-OAEP" - // RSAOAEP256 ... - RSAOAEP256 JSONWebKeyEncryptionAlgorithm = "RSA-OAEP-256" -) - -// PossibleJSONWebKeyEncryptionAlgorithmValues returns an array of possible values for the JSONWebKeyEncryptionAlgorithm const type. -func PossibleJSONWebKeyEncryptionAlgorithmValues() []JSONWebKeyEncryptionAlgorithm { - return []JSONWebKeyEncryptionAlgorithm{RSA15, RSAOAEP, RSAOAEP256} -} - -// JSONWebKeyOperation enumerates the values for json web key operation. -type JSONWebKeyOperation string - -const ( - // Decrypt ... - Decrypt JSONWebKeyOperation = "decrypt" - // Encrypt ... - Encrypt JSONWebKeyOperation = "encrypt" - // Import ... - Import JSONWebKeyOperation = "import" - // Sign ... - Sign JSONWebKeyOperation = "sign" - // UnwrapKey ... - UnwrapKey JSONWebKeyOperation = "unwrapKey" - // Verify ... - Verify JSONWebKeyOperation = "verify" - // WrapKey ... - WrapKey JSONWebKeyOperation = "wrapKey" -) - -// PossibleJSONWebKeyOperationValues returns an array of possible values for the JSONWebKeyOperation const type. -func PossibleJSONWebKeyOperationValues() []JSONWebKeyOperation { - return []JSONWebKeyOperation{Decrypt, Encrypt, Import, Sign, UnwrapKey, Verify, WrapKey} -} - -// JSONWebKeySignatureAlgorithm enumerates the values for json web key signature algorithm. -type JSONWebKeySignatureAlgorithm string - -const ( - // ES256 ECDSA using P-256 and SHA-256, as described in https://tools.ietf.org/html/rfc7518. - ES256 JSONWebKeySignatureAlgorithm = "ES256" - // ES256K ECDSA using P-256K and SHA-256, as described in https://tools.ietf.org/html/rfc7518 - ES256K JSONWebKeySignatureAlgorithm = "ES256K" - // ES384 ECDSA using P-384 and SHA-384, as described in https://tools.ietf.org/html/rfc7518 - ES384 JSONWebKeySignatureAlgorithm = "ES384" - // ES512 ECDSA using P-521 and SHA-512, as described in https://tools.ietf.org/html/rfc7518 - ES512 JSONWebKeySignatureAlgorithm = "ES512" - // PS256 RSASSA-PSS using SHA-256 and MGF1 with SHA-256, as described in - // https://tools.ietf.org/html/rfc7518 - PS256 JSONWebKeySignatureAlgorithm = "PS256" - // PS384 RSASSA-PSS using SHA-384 and MGF1 with SHA-384, as described in - // https://tools.ietf.org/html/rfc7518 - PS384 JSONWebKeySignatureAlgorithm = "PS384" - // PS512 RSASSA-PSS using SHA-512 and MGF1 with SHA-512, as described in - // https://tools.ietf.org/html/rfc7518 - PS512 JSONWebKeySignatureAlgorithm = "PS512" - // RS256 RSASSA-PKCS1-v1_5 using SHA-256, as described in https://tools.ietf.org/html/rfc7518 - RS256 JSONWebKeySignatureAlgorithm = "RS256" - // RS384 RSASSA-PKCS1-v1_5 using SHA-384, as described in https://tools.ietf.org/html/rfc7518 - RS384 JSONWebKeySignatureAlgorithm = "RS384" - // RS512 RSASSA-PKCS1-v1_5 using SHA-512, as described in https://tools.ietf.org/html/rfc7518 - RS512 JSONWebKeySignatureAlgorithm = "RS512" - // RSNULL Reserved - RSNULL JSONWebKeySignatureAlgorithm = "RSNULL" -) - -// PossibleJSONWebKeySignatureAlgorithmValues returns an array of possible values for the JSONWebKeySignatureAlgorithm const type. -func PossibleJSONWebKeySignatureAlgorithmValues() []JSONWebKeySignatureAlgorithm { - return []JSONWebKeySignatureAlgorithm{ES256, ES256K, ES384, ES512, PS256, PS384, PS512, RS256, RS384, RS512, RSNULL} -} - -// JSONWebKeyType enumerates the values for json web key type. -type JSONWebKeyType string - -const ( - // EC ... - EC JSONWebKeyType = "EC" - // ECHSM ... - ECHSM JSONWebKeyType = "EC-HSM" - // Oct ... - Oct JSONWebKeyType = "oct" - // RSA ... - RSA JSONWebKeyType = "RSA" - // RSAHSM ... - RSAHSM JSONWebKeyType = "RSA-HSM" -) - -// PossibleJSONWebKeyTypeValues returns an array of possible values for the JSONWebKeyType const type. -func PossibleJSONWebKeyTypeValues() []JSONWebKeyType { - return []JSONWebKeyType{EC, ECHSM, Oct, RSA, RSAHSM} -} - -// KeyUsageType enumerates the values for key usage type. -type KeyUsageType string - -const ( - // CRLSign ... - CRLSign KeyUsageType = "cRLSign" - // DataEncipherment ... - DataEncipherment KeyUsageType = "dataEncipherment" - // DecipherOnly ... - DecipherOnly KeyUsageType = "decipherOnly" - // DigitalSignature ... - DigitalSignature KeyUsageType = "digitalSignature" - // EncipherOnly ... - EncipherOnly KeyUsageType = "encipherOnly" - // KeyAgreement ... - KeyAgreement KeyUsageType = "keyAgreement" - // KeyCertSign ... - KeyCertSign KeyUsageType = "keyCertSign" - // KeyEncipherment ... - KeyEncipherment KeyUsageType = "keyEncipherment" - // NonRepudiation ... - NonRepudiation KeyUsageType = "nonRepudiation" -) - -// PossibleKeyUsageTypeValues returns an array of possible values for the KeyUsageType const type. -func PossibleKeyUsageTypeValues() []KeyUsageType { - return []KeyUsageType{CRLSign, DataEncipherment, DecipherOnly, DigitalSignature, EncipherOnly, KeyAgreement, KeyCertSign, KeyEncipherment, NonRepudiation} -} - -// SasTokenType enumerates the values for sas token type. -type SasTokenType string - -const ( - // Account ... - Account SasTokenType = "account" - // Service ... - Service SasTokenType = "service" -) - -// PossibleSasTokenTypeValues returns an array of possible values for the SasTokenType const type. -func PossibleSasTokenTypeValues() []SasTokenType { - return []SasTokenType{Account, Service} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/models.go deleted file mode 100644 index 8ebcb0f2..00000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/models.go +++ /dev/null @@ -1,3611 +0,0 @@ -package keyvault - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault" - -// Action the action that will be executed. -type Action struct { - // ActionType - The type of the action. Possible values include: 'EmailContacts', 'AutoRenew' - ActionType ActionType `json:"action_type,omitempty"` -} - -// AdministratorDetails details of the organization administrator of the certificate issuer. -type AdministratorDetails struct { - // FirstName - First name. - FirstName *string `json:"first_name,omitempty"` - // LastName - Last name. - LastName *string `json:"last_name,omitempty"` - // EmailAddress - Email address. - EmailAddress *string `json:"email,omitempty"` - // Phone - Phone number. - Phone *string `json:"phone,omitempty"` -} - -// Attributes the object attributes managed by the KeyVault service. -type Attributes struct { - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for Attributes. -func (a Attributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if a.Enabled != nil { - objectMap["enabled"] = a.Enabled - } - if a.NotBefore != nil { - objectMap["nbf"] = a.NotBefore - } - if a.Expires != nil { - objectMap["exp"] = a.Expires - } - return json.Marshal(objectMap) -} - -// BackupCertificateResult the backup certificate result, containing the backup blob. -type BackupCertificateResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up certificate. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for BackupCertificateResult. -func (bcr BackupCertificateResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// BackupKeyResult the backup key result, containing the backup blob. -type BackupKeyResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up key. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for BackupKeyResult. -func (bkr BackupKeyResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// BackupSecretResult the backup secret result, containing the backup blob. -type BackupSecretResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up secret. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for BackupSecretResult. -func (bsr BackupSecretResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// BackupStorageResult the backup storage result, containing the backup blob. -type BackupStorageResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up storage account. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for BackupStorageResult. -func (bsr BackupStorageResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CertificateAttributes the certificate management attributes. -type CertificateAttributes struct { - // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. - RecoverableDays *int32 `json:"recoverableDays,omitempty"` - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for certificates in the current vault. If it contains 'Purgeable', the certificate can be permanently deleted by a privileged user; otherwise, only the system can purge the certificate, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateAttributes. -func (ca CertificateAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ca.Enabled != nil { - objectMap["enabled"] = ca.Enabled - } - if ca.NotBefore != nil { - objectMap["nbf"] = ca.NotBefore - } - if ca.Expires != nil { - objectMap["exp"] = ca.Expires - } - return json.Marshal(objectMap) -} - -// CertificateBundle a certificate bundle consists of a certificate (X509) plus its attributes. -type CertificateBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // Kid - READ-ONLY; The key id. - Kid *string `json:"kid,omitempty"` - // Sid - READ-ONLY; The secret id. - Sid *string `json:"sid,omitempty"` - // X509Thumbprint - READ-ONLY; Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` - // Policy - READ-ONLY; The management policy. - Policy *CertificatePolicy `json:"policy,omitempty"` - // Cer - CER contents of x509 certificate. - Cer *[]byte `json:"cer,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The certificate attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateBundle. -func (cb CertificateBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cb.Cer != nil { - objectMap["cer"] = cb.Cer - } - if cb.ContentType != nil { - objectMap["contentType"] = cb.ContentType - } - if cb.Attributes != nil { - objectMap["attributes"] = cb.Attributes - } - if cb.Tags != nil { - objectMap["tags"] = cb.Tags - } - return json.Marshal(objectMap) -} - -// CertificateCreateParameters the certificate create parameters. -type CertificateCreateParameters struct { - // CertificatePolicy - The management policy for the certificate. - CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateCreateParameters. -func (ccp CertificateCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ccp.CertificatePolicy != nil { - objectMap["policy"] = ccp.CertificatePolicy - } - if ccp.CertificateAttributes != nil { - objectMap["attributes"] = ccp.CertificateAttributes - } - if ccp.Tags != nil { - objectMap["tags"] = ccp.Tags - } - return json.Marshal(objectMap) -} - -// CertificateImportParameters the certificate import parameters. -type CertificateImportParameters struct { - // Base64EncodedCertificate - A PEM file or a base64-encoded PFX file. PEM files need to contain the private key. - Base64EncodedCertificate *string `json:"value,omitempty"` - // Password - If the private key in base64EncodedCertificate is encrypted, the password used for encryption. - Password *string `json:"pwd,omitempty"` - // CertificatePolicy - The management policy for the certificate. - CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateImportParameters. -func (cip CertificateImportParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cip.Base64EncodedCertificate != nil { - objectMap["value"] = cip.Base64EncodedCertificate - } - if cip.Password != nil { - objectMap["pwd"] = cip.Password - } - if cip.CertificatePolicy != nil { - objectMap["policy"] = cip.CertificatePolicy - } - if cip.CertificateAttributes != nil { - objectMap["attributes"] = cip.CertificateAttributes - } - if cip.Tags != nil { - objectMap["tags"] = cip.Tags - } - return json.Marshal(objectMap) -} - -// CertificateIssuerItem the certificate issuer item containing certificate issuer metadata. -type CertificateIssuerItem struct { - // ID - Certificate Identifier. - ID *string `json:"id,omitempty"` - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` -} - -// CertificateIssuerListResult the certificate issuer list result. -type CertificateIssuerListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of certificate issuers in the key vault along with a link to the next page of certificate issuers. - Value *[]CertificateIssuerItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of certificate issuers. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateIssuerListResult. -func (cilr CertificateIssuerListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CertificateIssuerListResultIterator provides access to a complete listing of CertificateIssuerItem -// values. -type CertificateIssuerListResultIterator struct { - i int - page CertificateIssuerListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *CertificateIssuerListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateIssuerListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *CertificateIssuerListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter CertificateIssuerListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter CertificateIssuerListResultIterator) Response() CertificateIssuerListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter CertificateIssuerListResultIterator) Value() CertificateIssuerItem { - if !iter.page.NotDone() { - return CertificateIssuerItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the CertificateIssuerListResultIterator type. -func NewCertificateIssuerListResultIterator(page CertificateIssuerListResultPage) CertificateIssuerListResultIterator { - return CertificateIssuerListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (cilr CertificateIssuerListResult) IsEmpty() bool { - return cilr.Value == nil || len(*cilr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (cilr CertificateIssuerListResult) hasNextLink() bool { - return cilr.NextLink != nil && len(*cilr.NextLink) != 0 -} - -// certificateIssuerListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (cilr CertificateIssuerListResult) certificateIssuerListResultPreparer(ctx context.Context) (*http.Request, error) { - if !cilr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(cilr.NextLink))) -} - -// CertificateIssuerListResultPage contains a page of CertificateIssuerItem values. -type CertificateIssuerListResultPage struct { - fn func(context.Context, CertificateIssuerListResult) (CertificateIssuerListResult, error) - cilr CertificateIssuerListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *CertificateIssuerListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateIssuerListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.cilr) - if err != nil { - return err - } - page.cilr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *CertificateIssuerListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page CertificateIssuerListResultPage) NotDone() bool { - return !page.cilr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page CertificateIssuerListResultPage) Response() CertificateIssuerListResult { - return page.cilr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page CertificateIssuerListResultPage) Values() []CertificateIssuerItem { - if page.cilr.IsEmpty() { - return nil - } - return *page.cilr.Value -} - -// Creates a new instance of the CertificateIssuerListResultPage type. -func NewCertificateIssuerListResultPage(cur CertificateIssuerListResult, getNextPage func(context.Context, CertificateIssuerListResult) (CertificateIssuerListResult, error)) CertificateIssuerListResultPage { - return CertificateIssuerListResultPage{ - fn: getNextPage, - cilr: cur, - } -} - -// CertificateIssuerSetParameters the certificate issuer set parameters. -type CertificateIssuerSetParameters struct { - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` - // Credentials - The credentials to be used for the issuer. - Credentials *IssuerCredentials `json:"credentials,omitempty"` - // OrganizationDetails - Details of the organization as provided to the issuer. - OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` - // Attributes - Attributes of the issuer object. - Attributes *IssuerAttributes `json:"attributes,omitempty"` -} - -// CertificateIssuerUpdateParameters the certificate issuer update parameters. -type CertificateIssuerUpdateParameters struct { - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` - // Credentials - The credentials to be used for the issuer. - Credentials *IssuerCredentials `json:"credentials,omitempty"` - // OrganizationDetails - Details of the organization as provided to the issuer. - OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` - // Attributes - Attributes of the issuer object. - Attributes *IssuerAttributes `json:"attributes,omitempty"` -} - -// CertificateItem the certificate item containing certificate metadata. -type CertificateItem struct { - // ID - Certificate identifier. - ID *string `json:"id,omitempty"` - // Attributes - The certificate management attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // X509Thumbprint - Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateItem. -func (ci CertificateItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ci.ID != nil { - objectMap["id"] = ci.ID - } - if ci.Attributes != nil { - objectMap["attributes"] = ci.Attributes - } - if ci.Tags != nil { - objectMap["tags"] = ci.Tags - } - if ci.X509Thumbprint != nil { - objectMap["x5t"] = ci.X509Thumbprint - } - return json.Marshal(objectMap) -} - -// CertificateListResult the certificate list result. -type CertificateListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of certificates in the key vault along with a link to the next page of certificates. - Value *[]CertificateItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of certificates. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateListResult. -func (clr CertificateListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CertificateListResultIterator provides access to a complete listing of CertificateItem values. -type CertificateListResultIterator struct { - i int - page CertificateListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *CertificateListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *CertificateListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter CertificateListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter CertificateListResultIterator) Response() CertificateListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter CertificateListResultIterator) Value() CertificateItem { - if !iter.page.NotDone() { - return CertificateItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the CertificateListResultIterator type. -func NewCertificateListResultIterator(page CertificateListResultPage) CertificateListResultIterator { - return CertificateListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (clr CertificateListResult) IsEmpty() bool { - return clr.Value == nil || len(*clr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (clr CertificateListResult) hasNextLink() bool { - return clr.NextLink != nil && len(*clr.NextLink) != 0 -} - -// certificateListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (clr CertificateListResult) certificateListResultPreparer(ctx context.Context) (*http.Request, error) { - if !clr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(clr.NextLink))) -} - -// CertificateListResultPage contains a page of CertificateItem values. -type CertificateListResultPage struct { - fn func(context.Context, CertificateListResult) (CertificateListResult, error) - clr CertificateListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *CertificateListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.clr) - if err != nil { - return err - } - page.clr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *CertificateListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page CertificateListResultPage) NotDone() bool { - return !page.clr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page CertificateListResultPage) Response() CertificateListResult { - return page.clr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page CertificateListResultPage) Values() []CertificateItem { - if page.clr.IsEmpty() { - return nil - } - return *page.clr.Value -} - -// Creates a new instance of the CertificateListResultPage type. -func NewCertificateListResultPage(cur CertificateListResult, getNextPage func(context.Context, CertificateListResult) (CertificateListResult, error)) CertificateListResultPage { - return CertificateListResultPage{ - fn: getNextPage, - clr: cur, - } -} - -// CertificateMergeParameters the certificate merge parameters -type CertificateMergeParameters struct { - // X509Certificates - The certificate or the certificate chain to merge. - X509Certificates *[][]byte `json:"x5c,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateMergeParameters. -func (cmp CertificateMergeParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cmp.X509Certificates != nil { - objectMap["x5c"] = cmp.X509Certificates - } - if cmp.CertificateAttributes != nil { - objectMap["attributes"] = cmp.CertificateAttributes - } - if cmp.Tags != nil { - objectMap["tags"] = cmp.Tags - } - return json.Marshal(objectMap) -} - -// CertificateOperation a certificate operation is returned in case of asynchronous requests. -type CertificateOperation struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // IssuerParameters - Parameters for the issuer of the X509 component of a certificate. - IssuerParameters *IssuerParameters `json:"issuer,omitempty"` - // Csr - The certificate signing request (CSR) that is being used in the certificate operation. - Csr *[]byte `json:"csr,omitempty"` - // CancellationRequested - Indicates if cancellation was requested on the certificate operation. - CancellationRequested *bool `json:"cancellation_requested,omitempty"` - // Status - Status of the certificate operation. - Status *string `json:"status,omitempty"` - // StatusDetails - The status details of the certificate operation. - StatusDetails *string `json:"status_details,omitempty"` - // Error - Error encountered, if any, during the certificate operation. - Error *Error `json:"error,omitempty"` - // Target - Location which contains the result of the certificate operation. - Target *string `json:"target,omitempty"` - // RequestID - Identifier for the certificate operation. - RequestID *string `json:"request_id,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateOperation. -func (co CertificateOperation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if co.IssuerParameters != nil { - objectMap["issuer"] = co.IssuerParameters - } - if co.Csr != nil { - objectMap["csr"] = co.Csr - } - if co.CancellationRequested != nil { - objectMap["cancellation_requested"] = co.CancellationRequested - } - if co.Status != nil { - objectMap["status"] = co.Status - } - if co.StatusDetails != nil { - objectMap["status_details"] = co.StatusDetails - } - if co.Error != nil { - objectMap["error"] = co.Error - } - if co.Target != nil { - objectMap["target"] = co.Target - } - if co.RequestID != nil { - objectMap["request_id"] = co.RequestID - } - return json.Marshal(objectMap) -} - -// CertificateOperationUpdateParameter the certificate operation update parameters. -type CertificateOperationUpdateParameter struct { - // CancellationRequested - Indicates if cancellation was requested on the certificate operation. - CancellationRequested *bool `json:"cancellation_requested,omitempty"` -} - -// CertificatePolicy management policy for a certificate. -type CertificatePolicy struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // KeyProperties - Properties of the key backing a certificate. - KeyProperties *KeyProperties `json:"key_props,omitempty"` - // SecretProperties - Properties of the secret backing a certificate. - SecretProperties *SecretProperties `json:"secret_props,omitempty"` - // X509CertificateProperties - Properties of the X509 component of a certificate. - X509CertificateProperties *X509CertificateProperties `json:"x509_props,omitempty"` - // LifetimeActions - Actions that will be performed by Key Vault over the lifetime of a certificate. - LifetimeActions *[]LifetimeAction `json:"lifetime_actions,omitempty"` - // IssuerParameters - Parameters for the issuer of the X509 component of a certificate. - IssuerParameters *IssuerParameters `json:"issuer,omitempty"` - // Attributes - The certificate attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificatePolicy. -func (cp CertificatePolicy) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cp.KeyProperties != nil { - objectMap["key_props"] = cp.KeyProperties - } - if cp.SecretProperties != nil { - objectMap["secret_props"] = cp.SecretProperties - } - if cp.X509CertificateProperties != nil { - objectMap["x509_props"] = cp.X509CertificateProperties - } - if cp.LifetimeActions != nil { - objectMap["lifetime_actions"] = cp.LifetimeActions - } - if cp.IssuerParameters != nil { - objectMap["issuer"] = cp.IssuerParameters - } - if cp.Attributes != nil { - objectMap["attributes"] = cp.Attributes - } - return json.Marshal(objectMap) -} - -// CertificateRestoreParameters the certificate restore parameters. -type CertificateRestoreParameters struct { - // CertificateBundleBackup - The backup blob associated with a certificate bundle. (a URL-encoded base64 string) - CertificateBundleBackup *string `json:"value,omitempty"` -} - -// CertificateUpdateParameters the certificate update parameters. -type CertificateUpdateParameters struct { - // CertificatePolicy - The management policy for the certificate. - CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateUpdateParameters. -func (cup CertificateUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cup.CertificatePolicy != nil { - objectMap["policy"] = cup.CertificatePolicy - } - if cup.CertificateAttributes != nil { - objectMap["attributes"] = cup.CertificateAttributes - } - if cup.Tags != nil { - objectMap["tags"] = cup.Tags - } - return json.Marshal(objectMap) -} - -// Contact the contact information for the vault certificates. -type Contact struct { - // EmailAddress - Email address. - EmailAddress *string `json:"email,omitempty"` - // Name - Name. - Name *string `json:"name,omitempty"` - // Phone - Phone number. - Phone *string `json:"phone,omitempty"` -} - -// Contacts the contacts for the vault certificates. -type Contacts struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; Identifier for the contacts collection. - ID *string `json:"id,omitempty"` - // ContactList - The contact list for the vault certificates. - ContactList *[]Contact `json:"contacts,omitempty"` -} - -// MarshalJSON is the custom marshaler for Contacts. -func (c Contacts) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if c.ContactList != nil { - objectMap["contacts"] = c.ContactList - } - return json.Marshal(objectMap) -} - -// DeletedCertificateBundle a Deleted Certificate consisting of its previous id, attributes and its tags, -// as well as information on when it will be purged. -type DeletedCertificateBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted certificate. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the certificate is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the certificate was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // Kid - READ-ONLY; The key id. - Kid *string `json:"kid,omitempty"` - // Sid - READ-ONLY; The secret id. - Sid *string `json:"sid,omitempty"` - // X509Thumbprint - READ-ONLY; Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` - // Policy - READ-ONLY; The management policy. - Policy *CertificatePolicy `json:"policy,omitempty"` - // Cer - CER contents of x509 certificate. - Cer *[]byte `json:"cer,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The certificate attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedCertificateBundle. -func (dcb DeletedCertificateBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dcb.RecoveryID != nil { - objectMap["recoveryId"] = dcb.RecoveryID - } - if dcb.Cer != nil { - objectMap["cer"] = dcb.Cer - } - if dcb.ContentType != nil { - objectMap["contentType"] = dcb.ContentType - } - if dcb.Attributes != nil { - objectMap["attributes"] = dcb.Attributes - } - if dcb.Tags != nil { - objectMap["tags"] = dcb.Tags - } - return json.Marshal(objectMap) -} - -// DeletedCertificateItem the deleted certificate item containing metadata about the deleted certificate. -type DeletedCertificateItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted certificate. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the certificate is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the certificate was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - Certificate identifier. - ID *string `json:"id,omitempty"` - // Attributes - The certificate management attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // X509Thumbprint - Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedCertificateItem. -func (dci DeletedCertificateItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dci.RecoveryID != nil { - objectMap["recoveryId"] = dci.RecoveryID - } - if dci.ID != nil { - objectMap["id"] = dci.ID - } - if dci.Attributes != nil { - objectMap["attributes"] = dci.Attributes - } - if dci.Tags != nil { - objectMap["tags"] = dci.Tags - } - if dci.X509Thumbprint != nil { - objectMap["x5t"] = dci.X509Thumbprint - } - return json.Marshal(objectMap) -} - -// DeletedCertificateListResult a list of certificates that have been deleted in this vault. -type DeletedCertificateListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of deleted certificates in the vault along with a link to the next page of deleted certificates - Value *[]DeletedCertificateItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted certificates. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedCertificateListResult. -func (dclr DeletedCertificateListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedCertificateListResultIterator provides access to a complete listing of DeletedCertificateItem -// values. -type DeletedCertificateListResultIterator struct { - i int - page DeletedCertificateListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedCertificateListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedCertificateListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedCertificateListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedCertificateListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedCertificateListResultIterator) Response() DeletedCertificateListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedCertificateListResultIterator) Value() DeletedCertificateItem { - if !iter.page.NotDone() { - return DeletedCertificateItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedCertificateListResultIterator type. -func NewDeletedCertificateListResultIterator(page DeletedCertificateListResultPage) DeletedCertificateListResultIterator { - return DeletedCertificateListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dclr DeletedCertificateListResult) IsEmpty() bool { - return dclr.Value == nil || len(*dclr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dclr DeletedCertificateListResult) hasNextLink() bool { - return dclr.NextLink != nil && len(*dclr.NextLink) != 0 -} - -// deletedCertificateListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dclr DeletedCertificateListResult) deletedCertificateListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dclr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dclr.NextLink))) -} - -// DeletedCertificateListResultPage contains a page of DeletedCertificateItem values. -type DeletedCertificateListResultPage struct { - fn func(context.Context, DeletedCertificateListResult) (DeletedCertificateListResult, error) - dclr DeletedCertificateListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedCertificateListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedCertificateListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dclr) - if err != nil { - return err - } - page.dclr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedCertificateListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedCertificateListResultPage) NotDone() bool { - return !page.dclr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedCertificateListResultPage) Response() DeletedCertificateListResult { - return page.dclr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedCertificateListResultPage) Values() []DeletedCertificateItem { - if page.dclr.IsEmpty() { - return nil - } - return *page.dclr.Value -} - -// Creates a new instance of the DeletedCertificateListResultPage type. -func NewDeletedCertificateListResultPage(cur DeletedCertificateListResult, getNextPage func(context.Context, DeletedCertificateListResult) (DeletedCertificateListResult, error)) DeletedCertificateListResultPage { - return DeletedCertificateListResultPage{ - fn: getNextPage, - dclr: cur, - } -} - -// DeletedKeyBundle a DeletedKeyBundle consisting of a WebKey plus its Attributes and deletion info -type DeletedKeyBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted key. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the key is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the key was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // Key - The Json web key. - Key *JSONWebKey `json:"key,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedKeyBundle. -func (dkb DeletedKeyBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dkb.RecoveryID != nil { - objectMap["recoveryId"] = dkb.RecoveryID - } - if dkb.Key != nil { - objectMap["key"] = dkb.Key - } - if dkb.Attributes != nil { - objectMap["attributes"] = dkb.Attributes - } - if dkb.Tags != nil { - objectMap["tags"] = dkb.Tags - } - return json.Marshal(objectMap) -} - -// DeletedKeyItem the deleted key item containing the deleted key metadata and information about deletion. -type DeletedKeyItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted key. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the key is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the key was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // Kid - Key identifier. - Kid *string `json:"kid,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedKeyItem. -func (dki DeletedKeyItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dki.RecoveryID != nil { - objectMap["recoveryId"] = dki.RecoveryID - } - if dki.Kid != nil { - objectMap["kid"] = dki.Kid - } - if dki.Attributes != nil { - objectMap["attributes"] = dki.Attributes - } - if dki.Tags != nil { - objectMap["tags"] = dki.Tags - } - return json.Marshal(objectMap) -} - -// DeletedKeyListResult a list of keys that have been deleted in this vault. -type DeletedKeyListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of deleted keys in the vault along with a link to the next page of deleted keys - Value *[]DeletedKeyItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted keys. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedKeyListResult. -func (dklr DeletedKeyListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedKeyListResultIterator provides access to a complete listing of DeletedKeyItem values. -type DeletedKeyListResultIterator struct { - i int - page DeletedKeyListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedKeyListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedKeyListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedKeyListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedKeyListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedKeyListResultIterator) Response() DeletedKeyListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedKeyListResultIterator) Value() DeletedKeyItem { - if !iter.page.NotDone() { - return DeletedKeyItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedKeyListResultIterator type. -func NewDeletedKeyListResultIterator(page DeletedKeyListResultPage) DeletedKeyListResultIterator { - return DeletedKeyListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dklr DeletedKeyListResult) IsEmpty() bool { - return dklr.Value == nil || len(*dklr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dklr DeletedKeyListResult) hasNextLink() bool { - return dklr.NextLink != nil && len(*dklr.NextLink) != 0 -} - -// deletedKeyListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dklr DeletedKeyListResult) deletedKeyListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dklr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dklr.NextLink))) -} - -// DeletedKeyListResultPage contains a page of DeletedKeyItem values. -type DeletedKeyListResultPage struct { - fn func(context.Context, DeletedKeyListResult) (DeletedKeyListResult, error) - dklr DeletedKeyListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedKeyListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedKeyListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dklr) - if err != nil { - return err - } - page.dklr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedKeyListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedKeyListResultPage) NotDone() bool { - return !page.dklr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedKeyListResultPage) Response() DeletedKeyListResult { - return page.dklr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedKeyListResultPage) Values() []DeletedKeyItem { - if page.dklr.IsEmpty() { - return nil - } - return *page.dklr.Value -} - -// Creates a new instance of the DeletedKeyListResultPage type. -func NewDeletedKeyListResultPage(cur DeletedKeyListResult, getNextPage func(context.Context, DeletedKeyListResult) (DeletedKeyListResult, error)) DeletedKeyListResultPage { - return DeletedKeyListResultPage{ - fn: getNextPage, - dklr: cur, - } -} - -// DeletedSasDefinitionBundle a deleted SAS definition bundle consisting of its previous id, attributes and -// its tags, as well as information on when it will be purged. -type DeletedSasDefinitionBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted SAS definition. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the SAS definition is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the SAS definition was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; The SAS definition id. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; Storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // TemplateURI - READ-ONLY; The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. - TemplateURI *string `json:"templateUri,omitempty"` - // SasType - READ-ONLY; The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' - SasType SasTokenType `json:"sasType,omitempty"` - // ValidityPeriod - READ-ONLY; The validity period of SAS tokens created according to the SAS definition. - ValidityPeriod *string `json:"validityPeriod,omitempty"` - // Attributes - READ-ONLY; The SAS definition attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedSasDefinitionBundle. -func (dsdb DeletedSasDefinitionBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsdb.RecoveryID != nil { - objectMap["recoveryId"] = dsdb.RecoveryID - } - return json.Marshal(objectMap) -} - -// DeletedSasDefinitionItem the deleted SAS definition item containing metadata about the deleted SAS -// definition. -type DeletedSasDefinitionItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted SAS definition. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the SAS definition is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the SAS definition was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; The storage SAS identifier. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; The storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // Attributes - READ-ONLY; The SAS definition management attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedSasDefinitionItem. -func (dsdi DeletedSasDefinitionItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsdi.RecoveryID != nil { - objectMap["recoveryId"] = dsdi.RecoveryID - } - return json.Marshal(objectMap) -} - -// DeletedSasDefinitionListResult the deleted SAS definition list result -type DeletedSasDefinitionListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of the deleted SAS definitions in the vault along with a link to the next page of deleted sas definitions - Value *[]DeletedSasDefinitionItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted SAS definitions. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSasDefinitionListResult. -func (dsdlr DeletedSasDefinitionListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedSasDefinitionListResultIterator provides access to a complete listing of DeletedSasDefinitionItem -// values. -type DeletedSasDefinitionListResultIterator struct { - i int - page DeletedSasDefinitionListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedSasDefinitionListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSasDefinitionListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedSasDefinitionListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedSasDefinitionListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedSasDefinitionListResultIterator) Response() DeletedSasDefinitionListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedSasDefinitionListResultIterator) Value() DeletedSasDefinitionItem { - if !iter.page.NotDone() { - return DeletedSasDefinitionItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedSasDefinitionListResultIterator type. -func NewDeletedSasDefinitionListResultIterator(page DeletedSasDefinitionListResultPage) DeletedSasDefinitionListResultIterator { - return DeletedSasDefinitionListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dsdlr DeletedSasDefinitionListResult) IsEmpty() bool { - return dsdlr.Value == nil || len(*dsdlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dsdlr DeletedSasDefinitionListResult) hasNextLink() bool { - return dsdlr.NextLink != nil && len(*dsdlr.NextLink) != 0 -} - -// deletedSasDefinitionListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dsdlr DeletedSasDefinitionListResult) deletedSasDefinitionListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dsdlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dsdlr.NextLink))) -} - -// DeletedSasDefinitionListResultPage contains a page of DeletedSasDefinitionItem values. -type DeletedSasDefinitionListResultPage struct { - fn func(context.Context, DeletedSasDefinitionListResult) (DeletedSasDefinitionListResult, error) - dsdlr DeletedSasDefinitionListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedSasDefinitionListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSasDefinitionListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dsdlr) - if err != nil { - return err - } - page.dsdlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedSasDefinitionListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedSasDefinitionListResultPage) NotDone() bool { - return !page.dsdlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedSasDefinitionListResultPage) Response() DeletedSasDefinitionListResult { - return page.dsdlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedSasDefinitionListResultPage) Values() []DeletedSasDefinitionItem { - if page.dsdlr.IsEmpty() { - return nil - } - return *page.dsdlr.Value -} - -// Creates a new instance of the DeletedSasDefinitionListResultPage type. -func NewDeletedSasDefinitionListResultPage(cur DeletedSasDefinitionListResult, getNextPage func(context.Context, DeletedSasDefinitionListResult) (DeletedSasDefinitionListResult, error)) DeletedSasDefinitionListResultPage { - return DeletedSasDefinitionListResultPage{ - fn: getNextPage, - dsdlr: cur, - } -} - -// DeletedSecretBundle a Deleted Secret consisting of its previous id, attributes and its tags, as well as -// information on when it will be purged. -type DeletedSecretBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted secret. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the secret is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the secret was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // Value - The secret value. - Value *string `json:"value,omitempty"` - // ID - The secret id. - ID *string `json:"id,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Kid - READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV certificate. - Kid *string `json:"kid,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSecretBundle. -func (dsb DeletedSecretBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsb.RecoveryID != nil { - objectMap["recoveryId"] = dsb.RecoveryID - } - if dsb.Value != nil { - objectMap["value"] = dsb.Value - } - if dsb.ID != nil { - objectMap["id"] = dsb.ID - } - if dsb.ContentType != nil { - objectMap["contentType"] = dsb.ContentType - } - if dsb.Attributes != nil { - objectMap["attributes"] = dsb.Attributes - } - if dsb.Tags != nil { - objectMap["tags"] = dsb.Tags - } - return json.Marshal(objectMap) -} - -// DeletedSecretItem the deleted secret item containing metadata about the deleted secret. -type DeletedSecretItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted secret. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the secret is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the secret was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - Secret identifier. - ID *string `json:"id,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSecretItem. -func (dsi DeletedSecretItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsi.RecoveryID != nil { - objectMap["recoveryId"] = dsi.RecoveryID - } - if dsi.ID != nil { - objectMap["id"] = dsi.ID - } - if dsi.Attributes != nil { - objectMap["attributes"] = dsi.Attributes - } - if dsi.Tags != nil { - objectMap["tags"] = dsi.Tags - } - if dsi.ContentType != nil { - objectMap["contentType"] = dsi.ContentType - } - return json.Marshal(objectMap) -} - -// DeletedSecretListResult the deleted secret list result -type DeletedSecretListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of the deleted secrets in the vault along with a link to the next page of deleted secrets - Value *[]DeletedSecretItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted secrets. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSecretListResult. -func (dslr DeletedSecretListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedSecretListResultIterator provides access to a complete listing of DeletedSecretItem values. -type DeletedSecretListResultIterator struct { - i int - page DeletedSecretListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedSecretListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSecretListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedSecretListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedSecretListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedSecretListResultIterator) Response() DeletedSecretListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedSecretListResultIterator) Value() DeletedSecretItem { - if !iter.page.NotDone() { - return DeletedSecretItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedSecretListResultIterator type. -func NewDeletedSecretListResultIterator(page DeletedSecretListResultPage) DeletedSecretListResultIterator { - return DeletedSecretListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dslr DeletedSecretListResult) IsEmpty() bool { - return dslr.Value == nil || len(*dslr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dslr DeletedSecretListResult) hasNextLink() bool { - return dslr.NextLink != nil && len(*dslr.NextLink) != 0 -} - -// deletedSecretListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dslr DeletedSecretListResult) deletedSecretListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dslr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dslr.NextLink))) -} - -// DeletedSecretListResultPage contains a page of DeletedSecretItem values. -type DeletedSecretListResultPage struct { - fn func(context.Context, DeletedSecretListResult) (DeletedSecretListResult, error) - dslr DeletedSecretListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedSecretListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSecretListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dslr) - if err != nil { - return err - } - page.dslr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedSecretListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedSecretListResultPage) NotDone() bool { - return !page.dslr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedSecretListResultPage) Response() DeletedSecretListResult { - return page.dslr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedSecretListResultPage) Values() []DeletedSecretItem { - if page.dslr.IsEmpty() { - return nil - } - return *page.dslr.Value -} - -// Creates a new instance of the DeletedSecretListResultPage type. -func NewDeletedSecretListResultPage(cur DeletedSecretListResult, getNextPage func(context.Context, DeletedSecretListResult) (DeletedSecretListResult, error)) DeletedSecretListResultPage { - return DeletedSecretListResultPage{ - fn: getNextPage, - dslr: cur, - } -} - -// DeletedStorageAccountItem the deleted storage account item containing metadata about the deleted storage -// account. -type DeletedStorageAccountItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted storage account. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the storage account is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the storage account was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; Storage identifier. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; Storage account resource Id. - ResourceID *string `json:"resourceId,omitempty"` - // Attributes - READ-ONLY; The storage account management attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedStorageAccountItem. -func (dsai DeletedStorageAccountItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsai.RecoveryID != nil { - objectMap["recoveryId"] = dsai.RecoveryID - } - return json.Marshal(objectMap) -} - -// DeletedStorageBundle a deleted storage account bundle consisting of its previous id, attributes and its -// tags, as well as information on when it will be purged. -type DeletedStorageBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted storage account. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the storage account is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the storage account was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; The storage account id. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; The storage account resource id. - ResourceID *string `json:"resourceId,omitempty"` - // ActiveKeyName - READ-ONLY; The current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - READ-ONLY; whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - READ-ONLY; The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // Attributes - READ-ONLY; The storage account attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedStorageBundle. -func (dsb DeletedStorageBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsb.RecoveryID != nil { - objectMap["recoveryId"] = dsb.RecoveryID - } - return json.Marshal(objectMap) -} - -// DeletedStorageListResult the deleted storage account list result -type DeletedStorageListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of the deleted storage accounts in the vault along with a link to the next page of deleted storage accounts - Value *[]DeletedStorageAccountItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted storage accounts. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedStorageListResult. -func (dslr DeletedStorageListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedStorageListResultIterator provides access to a complete listing of DeletedStorageAccountItem -// values. -type DeletedStorageListResultIterator struct { - i int - page DeletedStorageListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedStorageListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedStorageListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedStorageListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedStorageListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedStorageListResultIterator) Response() DeletedStorageListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedStorageListResultIterator) Value() DeletedStorageAccountItem { - if !iter.page.NotDone() { - return DeletedStorageAccountItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedStorageListResultIterator type. -func NewDeletedStorageListResultIterator(page DeletedStorageListResultPage) DeletedStorageListResultIterator { - return DeletedStorageListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dslr DeletedStorageListResult) IsEmpty() bool { - return dslr.Value == nil || len(*dslr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dslr DeletedStorageListResult) hasNextLink() bool { - return dslr.NextLink != nil && len(*dslr.NextLink) != 0 -} - -// deletedStorageListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dslr DeletedStorageListResult) deletedStorageListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dslr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dslr.NextLink))) -} - -// DeletedStorageListResultPage contains a page of DeletedStorageAccountItem values. -type DeletedStorageListResultPage struct { - fn func(context.Context, DeletedStorageListResult) (DeletedStorageListResult, error) - dslr DeletedStorageListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedStorageListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedStorageListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dslr) - if err != nil { - return err - } - page.dslr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedStorageListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedStorageListResultPage) NotDone() bool { - return !page.dslr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedStorageListResultPage) Response() DeletedStorageListResult { - return page.dslr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedStorageListResultPage) Values() []DeletedStorageAccountItem { - if page.dslr.IsEmpty() { - return nil - } - return *page.dslr.Value -} - -// Creates a new instance of the DeletedStorageListResultPage type. -func NewDeletedStorageListResultPage(cur DeletedStorageListResult, getNextPage func(context.Context, DeletedStorageListResult) (DeletedStorageListResult, error)) DeletedStorageListResultPage { - return DeletedStorageListResultPage{ - fn: getNextPage, - dslr: cur, - } -} - -// Error the key vault server error. -type Error struct { - // Code - READ-ONLY; The error code. - Code *string `json:"code,omitempty"` - // Message - READ-ONLY; The error message. - Message *string `json:"message,omitempty"` - // InnerError - READ-ONLY - InnerError *Error `json:"innererror,omitempty"` -} - -// MarshalJSON is the custom marshaler for Error. -func (e Error) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ErrorType the key vault error exception. -type ErrorType struct { - // Error - READ-ONLY - Error *Error `json:"error,omitempty"` -} - -// MarshalJSON is the custom marshaler for ErrorType. -func (et ErrorType) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// IssuerAttributes the attributes of an issuer managed by the Key Vault service. -type IssuerAttributes struct { - // Enabled - Determines whether the issuer is enabled. - Enabled *bool `json:"enabled,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for IssuerAttributes. -func (ia IssuerAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ia.Enabled != nil { - objectMap["enabled"] = ia.Enabled - } - return json.Marshal(objectMap) -} - -// IssuerBundle the issuer for Key Vault certificate. -type IssuerBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; Identifier for the issuer object. - ID *string `json:"id,omitempty"` - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` - // Credentials - The credentials to be used for the issuer. - Credentials *IssuerCredentials `json:"credentials,omitempty"` - // OrganizationDetails - Details of the organization as provided to the issuer. - OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` - // Attributes - Attributes of the issuer object. - Attributes *IssuerAttributes `json:"attributes,omitempty"` -} - -// MarshalJSON is the custom marshaler for IssuerBundle. -func (ib IssuerBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ib.Provider != nil { - objectMap["provider"] = ib.Provider - } - if ib.Credentials != nil { - objectMap["credentials"] = ib.Credentials - } - if ib.OrganizationDetails != nil { - objectMap["org_details"] = ib.OrganizationDetails - } - if ib.Attributes != nil { - objectMap["attributes"] = ib.Attributes - } - return json.Marshal(objectMap) -} - -// IssuerCredentials the credentials to be used for the certificate issuer. -type IssuerCredentials struct { - // AccountID - The user name/account name/account id. - AccountID *string `json:"account_id,omitempty"` - // Password - The password/secret/account key. - Password *string `json:"pwd,omitempty"` -} - -// IssuerParameters parameters for the issuer of the X509 component of a certificate. -type IssuerParameters struct { - // Name - Name of the referenced issuer object or reserved names; for example, 'Self' or 'Unknown'. - Name *string `json:"name,omitempty"` - // CertificateType - Certificate type as supported by the provider (optional); for example 'OV-SSL', 'EV-SSL' - CertificateType *string `json:"cty,omitempty"` - // CertificateTransparency - Indicates if the certificates generated under this policy should be published to certificate transparency logs. - CertificateTransparency *bool `json:"cert_transparency,omitempty"` -} - -// JSONWebKey as of http://tools.ietf.org/html/draft-ietf-jose-json-web-key-18 -type JSONWebKey struct { - // Kid - Key identifier. - Kid *string `json:"kid,omitempty"` - // Kty - JsonWebKey Key Type (kty), as defined in https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' - Kty JSONWebKeyType `json:"kty,omitempty"` - KeyOps *[]string `json:"key_ops,omitempty"` - // N - RSA modulus. (a URL-encoded base64 string) - N *string `json:"n,omitempty"` - // E - RSA public exponent. (a URL-encoded base64 string) - E *string `json:"e,omitempty"` - // D - RSA private exponent, or the D component of an EC private key. (a URL-encoded base64 string) - D *string `json:"d,omitempty"` - // DP - RSA private key parameter. (a URL-encoded base64 string) - DP *string `json:"dp,omitempty"` - // DQ - RSA private key parameter. (a URL-encoded base64 string) - DQ *string `json:"dq,omitempty"` - // QI - RSA private key parameter. (a URL-encoded base64 string) - QI *string `json:"qi,omitempty"` - // P - RSA secret prime. (a URL-encoded base64 string) - P *string `json:"p,omitempty"` - // Q - RSA secret prime, with p < q. (a URL-encoded base64 string) - Q *string `json:"q,omitempty"` - // K - Symmetric key. (a URL-encoded base64 string) - K *string `json:"k,omitempty"` - // T - HSM Token, used with 'Bring Your Own Key'. (a URL-encoded base64 string) - T *string `json:"key_hsm,omitempty"` - // Crv - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' - Crv JSONWebKeyCurveName `json:"crv,omitempty"` - // X - X component of an EC public key. (a URL-encoded base64 string) - X *string `json:"x,omitempty"` - // Y - Y component of an EC public key. (a URL-encoded base64 string) - Y *string `json:"y,omitempty"` -} - -// KeyAttributes the attributes of a key managed by the key vault service. -type KeyAttributes struct { - // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. - RecoverableDays *int32 `json:"recoverableDays,omitempty"` - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for keys in the current vault. If it contains 'Purgeable' the key can be permanently deleted by a privileged user; otherwise, only the system can purge the key, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyAttributes. -func (ka KeyAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ka.Enabled != nil { - objectMap["enabled"] = ka.Enabled - } - if ka.NotBefore != nil { - objectMap["nbf"] = ka.NotBefore - } - if ka.Expires != nil { - objectMap["exp"] = ka.Expires - } - return json.Marshal(objectMap) -} - -// KeyBundle a KeyBundle consisting of a WebKey plus its attributes. -type KeyBundle struct { - autorest.Response `json:"-"` - // Key - The Json web key. - Key *JSONWebKey `json:"key,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyBundle. -func (kb KeyBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kb.Key != nil { - objectMap["key"] = kb.Key - } - if kb.Attributes != nil { - objectMap["attributes"] = kb.Attributes - } - if kb.Tags != nil { - objectMap["tags"] = kb.Tags - } - return json.Marshal(objectMap) -} - -// KeyCreateParameters the key create parameters. -type KeyCreateParameters struct { - // Kty - The type of key to create. For valid values, see JsonWebKeyType. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' - Kty JSONWebKeyType `json:"kty,omitempty"` - // KeySize - The key size in bits. For example: 2048, 3072, or 4096 for RSA. - KeySize *int32 `json:"key_size,omitempty"` - KeyOps *[]JSONWebKeyOperation `json:"key_ops,omitempty"` - KeyAttributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Curve - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' - Curve JSONWebKeyCurveName `json:"crv,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyCreateParameters. -func (kcp KeyCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kcp.Kty != "" { - objectMap["kty"] = kcp.Kty - } - if kcp.KeySize != nil { - objectMap["key_size"] = kcp.KeySize - } - if kcp.KeyOps != nil { - objectMap["key_ops"] = kcp.KeyOps - } - if kcp.KeyAttributes != nil { - objectMap["attributes"] = kcp.KeyAttributes - } - if kcp.Tags != nil { - objectMap["tags"] = kcp.Tags - } - if kcp.Curve != "" { - objectMap["crv"] = kcp.Curve - } - return json.Marshal(objectMap) -} - -// KeyImportParameters the key import parameters. -type KeyImportParameters struct { - // Hsm - Whether to import as a hardware key (HSM) or software key. - Hsm *bool `json:"Hsm,omitempty"` - // Key - The Json web key - Key *JSONWebKey `json:"key,omitempty"` - // KeyAttributes - The key management attributes. - KeyAttributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for KeyImportParameters. -func (kip KeyImportParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kip.Hsm != nil { - objectMap["Hsm"] = kip.Hsm - } - if kip.Key != nil { - objectMap["key"] = kip.Key - } - if kip.KeyAttributes != nil { - objectMap["attributes"] = kip.KeyAttributes - } - if kip.Tags != nil { - objectMap["tags"] = kip.Tags - } - return json.Marshal(objectMap) -} - -// KeyItem the key item containing key metadata. -type KeyItem struct { - // Kid - Key identifier. - Kid *string `json:"kid,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyItem. -func (ki KeyItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ki.Kid != nil { - objectMap["kid"] = ki.Kid - } - if ki.Attributes != nil { - objectMap["attributes"] = ki.Attributes - } - if ki.Tags != nil { - objectMap["tags"] = ki.Tags - } - return json.Marshal(objectMap) -} - -// KeyListResult the key list result. -type KeyListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of keys in the key vault along with a link to the next page of keys. - Value *[]KeyItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of keys. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyListResult. -func (klr KeyListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// KeyListResultIterator provides access to a complete listing of KeyItem values. -type KeyListResultIterator struct { - i int - page KeyListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *KeyListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/KeyListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *KeyListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter KeyListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter KeyListResultIterator) Response() KeyListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter KeyListResultIterator) Value() KeyItem { - if !iter.page.NotDone() { - return KeyItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the KeyListResultIterator type. -func NewKeyListResultIterator(page KeyListResultPage) KeyListResultIterator { - return KeyListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (klr KeyListResult) IsEmpty() bool { - return klr.Value == nil || len(*klr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (klr KeyListResult) hasNextLink() bool { - return klr.NextLink != nil && len(*klr.NextLink) != 0 -} - -// keyListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (klr KeyListResult) keyListResultPreparer(ctx context.Context) (*http.Request, error) { - if !klr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(klr.NextLink))) -} - -// KeyListResultPage contains a page of KeyItem values. -type KeyListResultPage struct { - fn func(context.Context, KeyListResult) (KeyListResult, error) - klr KeyListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *KeyListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/KeyListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.klr) - if err != nil { - return err - } - page.klr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *KeyListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page KeyListResultPage) NotDone() bool { - return !page.klr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page KeyListResultPage) Response() KeyListResult { - return page.klr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page KeyListResultPage) Values() []KeyItem { - if page.klr.IsEmpty() { - return nil - } - return *page.klr.Value -} - -// Creates a new instance of the KeyListResultPage type. -func NewKeyListResultPage(cur KeyListResult, getNextPage func(context.Context, KeyListResult) (KeyListResult, error)) KeyListResultPage { - return KeyListResultPage{ - fn: getNextPage, - klr: cur, - } -} - -// KeyOperationResult the key operation result. -type KeyOperationResult struct { - autorest.Response `json:"-"` - // Kid - READ-ONLY; Key identifier - Kid *string `json:"kid,omitempty"` - // Result - READ-ONLY; a URL-encoded base64 string - Result *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyOperationResult. -func (kor KeyOperationResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// KeyOperationsParameters the key operations parameters. -type KeyOperationsParameters struct { - // Algorithm - algorithm identifier. Possible values include: 'RSAOAEP', 'RSAOAEP256', 'RSA15' - Algorithm JSONWebKeyEncryptionAlgorithm `json:"alg,omitempty"` - // Value - a URL-encoded base64 string - Value *string `json:"value,omitempty"` -} - -// KeyProperties properties of the key pair backing a certificate. -type KeyProperties struct { - // Exportable - Indicates if the private key can be exported. - Exportable *bool `json:"exportable,omitempty"` - // KeyType - The type of key pair to be used for the certificate. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' - KeyType JSONWebKeyType `json:"kty,omitempty"` - // KeySize - The key size in bits. For example: 2048, 3072, or 4096 for RSA. - KeySize *int32 `json:"key_size,omitempty"` - // ReuseKey - Indicates if the same key pair will be used on certificate renewal. - ReuseKey *bool `json:"reuse_key,omitempty"` - // Curve - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' - Curve JSONWebKeyCurveName `json:"crv,omitempty"` -} - -// KeyRestoreParameters the key restore parameters. -type KeyRestoreParameters struct { - // KeyBundleBackup - The backup blob associated with a key bundle. (a URL-encoded base64 string) - KeyBundleBackup *string `json:"value,omitempty"` -} - -// KeySignParameters the key operations parameters. -type KeySignParameters struct { - // Algorithm - The signing/verification algorithm identifier. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ES256K' - Algorithm JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` - // Value - a URL-encoded base64 string - Value *string `json:"value,omitempty"` -} - -// KeyUpdateParameters the key update parameters. -type KeyUpdateParameters struct { - // KeyOps - Json web key operations. For more information on possible key operations, see JsonWebKeyOperation. - KeyOps *[]JSONWebKeyOperation `json:"key_ops,omitempty"` - KeyAttributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for KeyUpdateParameters. -func (kup KeyUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kup.KeyOps != nil { - objectMap["key_ops"] = kup.KeyOps - } - if kup.KeyAttributes != nil { - objectMap["attributes"] = kup.KeyAttributes - } - if kup.Tags != nil { - objectMap["tags"] = kup.Tags - } - return json.Marshal(objectMap) -} - -// KeyVerifyParameters the key verify parameters. -type KeyVerifyParameters struct { - // Algorithm - The signing/verification algorithm. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ES256K' - Algorithm JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` - // Digest - The digest used for signing. (a URL-encoded base64 string) - Digest *string `json:"digest,omitempty"` - // Signature - The signature to be verified. (a URL-encoded base64 string) - Signature *string `json:"value,omitempty"` -} - -// KeyVerifyResult the key verify result. -type KeyVerifyResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; True if the signature is verified, otherwise false. - Value *bool `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyVerifyResult. -func (kvr KeyVerifyResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// LifetimeAction action and its trigger that will be performed by Key Vault over the lifetime of a -// certificate. -type LifetimeAction struct { - // Trigger - The condition that will execute the action. - Trigger *Trigger `json:"trigger,omitempty"` - // Action - The action that will be executed. - Action *Action `json:"action,omitempty"` -} - -// OrganizationDetails details of the organization of the certificate issuer. -type OrganizationDetails struct { - // ID - Id of the organization. - ID *string `json:"id,omitempty"` - // AdminDetails - Details of the organization administrator. - AdminDetails *[]AdministratorDetails `json:"admin_details,omitempty"` -} - -// PendingCertificateSigningRequestResult the pending certificate signing request result. -type PendingCertificateSigningRequestResult struct { - // Value - READ-ONLY; The pending certificate signing request as Base64 encoded string. - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for PendingCertificateSigningRequestResult. -func (pcsrr PendingCertificateSigningRequestResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionAttributes the SAS definition management attributes. -type SasDefinitionAttributes struct { - // Enabled - the enabled state of the object. - Enabled *bool `json:"enabled,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` - // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. - RecoverableDays *int32 `json:"recoverableDays,omitempty"` - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for SAS definitions in the current vault. If it contains 'Purgeable' the SAS definition can be permanently deleted by a privileged user; otherwise, only the system can purge the SAS definition, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionAttributes. -func (sda SasDefinitionAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sda.Enabled != nil { - objectMap["enabled"] = sda.Enabled - } - return json.Marshal(objectMap) -} - -// SasDefinitionBundle a SAS definition bundle consists of key vault SAS definition details plus its -// attributes. -type SasDefinitionBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The SAS definition id. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; Storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // TemplateURI - READ-ONLY; The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. - TemplateURI *string `json:"templateUri,omitempty"` - // SasType - READ-ONLY; The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' - SasType SasTokenType `json:"sasType,omitempty"` - // ValidityPeriod - READ-ONLY; The validity period of SAS tokens created according to the SAS definition. - ValidityPeriod *string `json:"validityPeriod,omitempty"` - // Attributes - READ-ONLY; The SAS definition attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionBundle. -func (sdb SasDefinitionBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionCreateParameters the SAS definition create parameters. -type SasDefinitionCreateParameters struct { - // TemplateURI - The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. - TemplateURI *string `json:"templateUri,omitempty"` - // SasType - The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' - SasType SasTokenType `json:"sasType,omitempty"` - // ValidityPeriod - The validity period of SAS tokens created according to the SAS definition. - ValidityPeriod *string `json:"validityPeriod,omitempty"` - // SasDefinitionAttributes - The attributes of the SAS definition. - SasDefinitionAttributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionCreateParameters. -func (sdcp SasDefinitionCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sdcp.TemplateURI != nil { - objectMap["templateUri"] = sdcp.TemplateURI - } - if sdcp.SasType != "" { - objectMap["sasType"] = sdcp.SasType - } - if sdcp.ValidityPeriod != nil { - objectMap["validityPeriod"] = sdcp.ValidityPeriod - } - if sdcp.SasDefinitionAttributes != nil { - objectMap["attributes"] = sdcp.SasDefinitionAttributes - } - if sdcp.Tags != nil { - objectMap["tags"] = sdcp.Tags - } - return json.Marshal(objectMap) -} - -// SasDefinitionItem the SAS definition item containing storage SAS definition metadata. -type SasDefinitionItem struct { - // ID - READ-ONLY; The storage SAS identifier. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; The storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // Attributes - READ-ONLY; The SAS definition management attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionItem. -func (sdi SasDefinitionItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionListResult the storage account SAS definition list result. -type SasDefinitionListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of SAS definitions along with a link to the next page of SAS definitions. - Value *[]SasDefinitionItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of SAS definitions. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionListResult. -func (sdlr SasDefinitionListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionListResultIterator provides access to a complete listing of SasDefinitionItem values. -type SasDefinitionListResultIterator struct { - i int - page SasDefinitionListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *SasDefinitionListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SasDefinitionListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *SasDefinitionListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter SasDefinitionListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter SasDefinitionListResultIterator) Response() SasDefinitionListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter SasDefinitionListResultIterator) Value() SasDefinitionItem { - if !iter.page.NotDone() { - return SasDefinitionItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the SasDefinitionListResultIterator type. -func NewSasDefinitionListResultIterator(page SasDefinitionListResultPage) SasDefinitionListResultIterator { - return SasDefinitionListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (sdlr SasDefinitionListResult) IsEmpty() bool { - return sdlr.Value == nil || len(*sdlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (sdlr SasDefinitionListResult) hasNextLink() bool { - return sdlr.NextLink != nil && len(*sdlr.NextLink) != 0 -} - -// sasDefinitionListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (sdlr SasDefinitionListResult) sasDefinitionListResultPreparer(ctx context.Context) (*http.Request, error) { - if !sdlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(sdlr.NextLink))) -} - -// SasDefinitionListResultPage contains a page of SasDefinitionItem values. -type SasDefinitionListResultPage struct { - fn func(context.Context, SasDefinitionListResult) (SasDefinitionListResult, error) - sdlr SasDefinitionListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *SasDefinitionListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SasDefinitionListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.sdlr) - if err != nil { - return err - } - page.sdlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *SasDefinitionListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page SasDefinitionListResultPage) NotDone() bool { - return !page.sdlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page SasDefinitionListResultPage) Response() SasDefinitionListResult { - return page.sdlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page SasDefinitionListResultPage) Values() []SasDefinitionItem { - if page.sdlr.IsEmpty() { - return nil - } - return *page.sdlr.Value -} - -// Creates a new instance of the SasDefinitionListResultPage type. -func NewSasDefinitionListResultPage(cur SasDefinitionListResult, getNextPage func(context.Context, SasDefinitionListResult) (SasDefinitionListResult, error)) SasDefinitionListResultPage { - return SasDefinitionListResultPage{ - fn: getNextPage, - sdlr: cur, - } -} - -// SasDefinitionUpdateParameters the SAS definition update parameters. -type SasDefinitionUpdateParameters struct { - // TemplateURI - The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. - TemplateURI *string `json:"templateUri,omitempty"` - // SasType - The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' - SasType SasTokenType `json:"sasType,omitempty"` - // ValidityPeriod - The validity period of SAS tokens created according to the SAS definition. - ValidityPeriod *string `json:"validityPeriod,omitempty"` - // SasDefinitionAttributes - The attributes of the SAS definition. - SasDefinitionAttributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionUpdateParameters. -func (sdup SasDefinitionUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sdup.TemplateURI != nil { - objectMap["templateUri"] = sdup.TemplateURI - } - if sdup.SasType != "" { - objectMap["sasType"] = sdup.SasType - } - if sdup.ValidityPeriod != nil { - objectMap["validityPeriod"] = sdup.ValidityPeriod - } - if sdup.SasDefinitionAttributes != nil { - objectMap["attributes"] = sdup.SasDefinitionAttributes - } - if sdup.Tags != nil { - objectMap["tags"] = sdup.Tags - } - return json.Marshal(objectMap) -} - -// SecretAttributes the secret management attributes. -type SecretAttributes struct { - // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. - RecoverableDays *int32 `json:"recoverableDays,omitempty"` - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for secrets in the current vault. If it contains 'Purgeable', the secret can be permanently deleted by a privileged user; otherwise, only the system can purge the secret, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretAttributes. -func (sa SecretAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sa.Enabled != nil { - objectMap["enabled"] = sa.Enabled - } - if sa.NotBefore != nil { - objectMap["nbf"] = sa.NotBefore - } - if sa.Expires != nil { - objectMap["exp"] = sa.Expires - } - return json.Marshal(objectMap) -} - -// SecretBundle a secret consisting of a value, id and its attributes. -type SecretBundle struct { - autorest.Response `json:"-"` - // Value - The secret value. - Value *string `json:"value,omitempty"` - // ID - The secret id. - ID *string `json:"id,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Kid - READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV certificate. - Kid *string `json:"kid,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretBundle. -func (sb SecretBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sb.Value != nil { - objectMap["value"] = sb.Value - } - if sb.ID != nil { - objectMap["id"] = sb.ID - } - if sb.ContentType != nil { - objectMap["contentType"] = sb.ContentType - } - if sb.Attributes != nil { - objectMap["attributes"] = sb.Attributes - } - if sb.Tags != nil { - objectMap["tags"] = sb.Tags - } - return json.Marshal(objectMap) -} - -// SecretItem the secret item containing secret metadata. -type SecretItem struct { - // ID - Secret identifier. - ID *string `json:"id,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretItem. -func (si SecretItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if si.ID != nil { - objectMap["id"] = si.ID - } - if si.Attributes != nil { - objectMap["attributes"] = si.Attributes - } - if si.Tags != nil { - objectMap["tags"] = si.Tags - } - if si.ContentType != nil { - objectMap["contentType"] = si.ContentType - } - return json.Marshal(objectMap) -} - -// SecretListResult the secret list result. -type SecretListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of secrets in the key vault along with a link to the next page of secrets. - Value *[]SecretItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of secrets. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretListResult. -func (slr SecretListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SecretListResultIterator provides access to a complete listing of SecretItem values. -type SecretListResultIterator struct { - i int - page SecretListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *SecretListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SecretListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *SecretListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter SecretListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter SecretListResultIterator) Response() SecretListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter SecretListResultIterator) Value() SecretItem { - if !iter.page.NotDone() { - return SecretItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the SecretListResultIterator type. -func NewSecretListResultIterator(page SecretListResultPage) SecretListResultIterator { - return SecretListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (slr SecretListResult) IsEmpty() bool { - return slr.Value == nil || len(*slr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (slr SecretListResult) hasNextLink() bool { - return slr.NextLink != nil && len(*slr.NextLink) != 0 -} - -// secretListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (slr SecretListResult) secretListResultPreparer(ctx context.Context) (*http.Request, error) { - if !slr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(slr.NextLink))) -} - -// SecretListResultPage contains a page of SecretItem values. -type SecretListResultPage struct { - fn func(context.Context, SecretListResult) (SecretListResult, error) - slr SecretListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *SecretListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SecretListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.slr) - if err != nil { - return err - } - page.slr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *SecretListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page SecretListResultPage) NotDone() bool { - return !page.slr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page SecretListResultPage) Response() SecretListResult { - return page.slr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page SecretListResultPage) Values() []SecretItem { - if page.slr.IsEmpty() { - return nil - } - return *page.slr.Value -} - -// Creates a new instance of the SecretListResultPage type. -func NewSecretListResultPage(cur SecretListResult, getNextPage func(context.Context, SecretListResult) (SecretListResult, error)) SecretListResultPage { - return SecretListResultPage{ - fn: getNextPage, - slr: cur, - } -} - -// SecretProperties properties of the key backing a certificate. -type SecretProperties struct { - // ContentType - The media type (MIME type). - ContentType *string `json:"contentType,omitempty"` -} - -// SecretRestoreParameters the secret restore parameters. -type SecretRestoreParameters struct { - // SecretBundleBackup - The backup blob associated with a secret bundle. (a URL-encoded base64 string) - SecretBundleBackup *string `json:"value,omitempty"` -} - -// SecretSetParameters the secret set parameters. -type SecretSetParameters struct { - // Value - The value of the secret. - Value *string `json:"value,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // SecretAttributes - The secret management attributes. - SecretAttributes *SecretAttributes `json:"attributes,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretSetParameters. -func (ssp SecretSetParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ssp.Value != nil { - objectMap["value"] = ssp.Value - } - if ssp.Tags != nil { - objectMap["tags"] = ssp.Tags - } - if ssp.ContentType != nil { - objectMap["contentType"] = ssp.ContentType - } - if ssp.SecretAttributes != nil { - objectMap["attributes"] = ssp.SecretAttributes - } - return json.Marshal(objectMap) -} - -// SecretUpdateParameters the secret update parameters. -type SecretUpdateParameters struct { - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // SecretAttributes - The secret management attributes. - SecretAttributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SecretUpdateParameters. -func (sup SecretUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sup.ContentType != nil { - objectMap["contentType"] = sup.ContentType - } - if sup.SecretAttributes != nil { - objectMap["attributes"] = sup.SecretAttributes - } - if sup.Tags != nil { - objectMap["tags"] = sup.Tags - } - return json.Marshal(objectMap) -} - -// StorageAccountAttributes the storage account management attributes. -type StorageAccountAttributes struct { - // Enabled - the enabled state of the object. - Enabled *bool `json:"enabled,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` - // RecoverableDays - READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. - RecoverableDays *int32 `json:"recoverableDays,omitempty"` - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for storage accounts in the current vault. If it contains 'Purgeable' the storage account can be permanently deleted by a privileged user; otherwise, only the system can purge the storage account, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` -} - -// MarshalJSON is the custom marshaler for StorageAccountAttributes. -func (saa StorageAccountAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if saa.Enabled != nil { - objectMap["enabled"] = saa.Enabled - } - return json.Marshal(objectMap) -} - -// StorageAccountCreateParameters the storage account create parameters. -type StorageAccountCreateParameters struct { - // ResourceID - Storage account resource id. - ResourceID *string `json:"resourceId,omitempty"` - // ActiveKeyName - Current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // StorageAccountAttributes - The attributes of the storage account. - StorageAccountAttributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageAccountCreateParameters. -func (sacp StorageAccountCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sacp.ResourceID != nil { - objectMap["resourceId"] = sacp.ResourceID - } - if sacp.ActiveKeyName != nil { - objectMap["activeKeyName"] = sacp.ActiveKeyName - } - if sacp.AutoRegenerateKey != nil { - objectMap["autoRegenerateKey"] = sacp.AutoRegenerateKey - } - if sacp.RegenerationPeriod != nil { - objectMap["regenerationPeriod"] = sacp.RegenerationPeriod - } - if sacp.StorageAccountAttributes != nil { - objectMap["attributes"] = sacp.StorageAccountAttributes - } - if sacp.Tags != nil { - objectMap["tags"] = sacp.Tags - } - return json.Marshal(objectMap) -} - -// StorageAccountItem the storage account item containing storage account metadata. -type StorageAccountItem struct { - // ID - READ-ONLY; Storage identifier. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; Storage account resource Id. - ResourceID *string `json:"resourceId,omitempty"` - // Attributes - READ-ONLY; The storage account management attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageAccountItem. -func (sai StorageAccountItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageAccountRegenerteKeyParameters the storage account key regenerate parameters. -type StorageAccountRegenerteKeyParameters struct { - // KeyName - The storage account key name. - KeyName *string `json:"keyName,omitempty"` -} - -// StorageAccountUpdateParameters the storage account update parameters. -type StorageAccountUpdateParameters struct { - // ActiveKeyName - The current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // StorageAccountAttributes - The attributes of the storage account. - StorageAccountAttributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageAccountUpdateParameters. -func (saup StorageAccountUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if saup.ActiveKeyName != nil { - objectMap["activeKeyName"] = saup.ActiveKeyName - } - if saup.AutoRegenerateKey != nil { - objectMap["autoRegenerateKey"] = saup.AutoRegenerateKey - } - if saup.RegenerationPeriod != nil { - objectMap["regenerationPeriod"] = saup.RegenerationPeriod - } - if saup.StorageAccountAttributes != nil { - objectMap["attributes"] = saup.StorageAccountAttributes - } - if saup.Tags != nil { - objectMap["tags"] = saup.Tags - } - return json.Marshal(objectMap) -} - -// StorageBundle a Storage account bundle consists of key vault storage account details plus its -// attributes. -type StorageBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The storage account id. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; The storage account resource id. - ResourceID *string `json:"resourceId,omitempty"` - // ActiveKeyName - READ-ONLY; The current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - READ-ONLY; whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - READ-ONLY; The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // Attributes - READ-ONLY; The storage account attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageBundle. -func (sb StorageBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageListResult the storage accounts list result. -type StorageListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of storage accounts in the key vault along with a link to the next page of storage accounts. - Value *[]StorageAccountItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of storage accounts. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for StorageListResult. -func (slr StorageListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageListResultIterator provides access to a complete listing of StorageAccountItem values. -type StorageListResultIterator struct { - i int - page StorageListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *StorageListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *StorageListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter StorageListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter StorageListResultIterator) Response() StorageListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter StorageListResultIterator) Value() StorageAccountItem { - if !iter.page.NotDone() { - return StorageAccountItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the StorageListResultIterator type. -func NewStorageListResultIterator(page StorageListResultPage) StorageListResultIterator { - return StorageListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (slr StorageListResult) IsEmpty() bool { - return slr.Value == nil || len(*slr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (slr StorageListResult) hasNextLink() bool { - return slr.NextLink != nil && len(*slr.NextLink) != 0 -} - -// storageListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (slr StorageListResult) storageListResultPreparer(ctx context.Context) (*http.Request, error) { - if !slr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(slr.NextLink))) -} - -// StorageListResultPage contains a page of StorageAccountItem values. -type StorageListResultPage struct { - fn func(context.Context, StorageListResult) (StorageListResult, error) - slr StorageListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *StorageListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.slr) - if err != nil { - return err - } - page.slr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *StorageListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page StorageListResultPage) NotDone() bool { - return !page.slr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page StorageListResultPage) Response() StorageListResult { - return page.slr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page StorageListResultPage) Values() []StorageAccountItem { - if page.slr.IsEmpty() { - return nil - } - return *page.slr.Value -} - -// Creates a new instance of the StorageListResultPage type. -func NewStorageListResultPage(cur StorageListResult, getNextPage func(context.Context, StorageListResult) (StorageListResult, error)) StorageListResultPage { - return StorageListResultPage{ - fn: getNextPage, - slr: cur, - } -} - -// StorageRestoreParameters the secret restore parameters. -type StorageRestoreParameters struct { - // StorageBundleBackup - The backup blob associated with a storage account. (a URL-encoded base64 string) - StorageBundleBackup *string `json:"value,omitempty"` -} - -// SubjectAlternativeNames the subject alternate names of a X509 object. -type SubjectAlternativeNames struct { - // Emails - Email addresses. - Emails *[]string `json:"emails,omitempty"` - // DNSNames - Domain names. - DNSNames *[]string `json:"dns_names,omitempty"` - // Upns - User principal names. - Upns *[]string `json:"upns,omitempty"` -} - -// Trigger a condition to be satisfied for an action to be executed. -type Trigger struct { - // LifetimePercentage - Percentage of lifetime at which to trigger. Value should be between 1 and 99. - LifetimePercentage *int32 `json:"lifetime_percentage,omitempty"` - // DaysBeforeExpiry - Days before expiry to attempt renewal. Value should be between 1 and validity_in_months multiplied by 27. If validity_in_months is 36, then value should be between 1 and 972 (36 * 27). - DaysBeforeExpiry *int32 `json:"days_before_expiry,omitempty"` -} - -// X509CertificateProperties properties of the X509 component of a certificate. -type X509CertificateProperties struct { - // Subject - The subject name. Should be a valid X509 distinguished Name. - Subject *string `json:"subject,omitempty"` - // Ekus - The enhanced key usage. - Ekus *[]string `json:"ekus,omitempty"` - // SubjectAlternativeNames - The subject alternative names. - SubjectAlternativeNames *SubjectAlternativeNames `json:"sans,omitempty"` - // KeyUsage - List of key usages. - KeyUsage *[]KeyUsageType `json:"key_usage,omitempty"` - // ValidityInMonths - The duration that the certificate is valid in months. - ValidityInMonths *int32 `json:"validity_months,omitempty"` -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/version.go deleted file mode 100644 index 60143005..00000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.1/keyvault/version.go +++ /dev/null @@ -1,19 +0,0 @@ -package keyvault - -import "github.com/Azure/azure-sdk-for-go/version" - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/" + Version() + " keyvault/7.1" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return version.Number -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go index f25ee491..bcfbb15c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -4,4 +4,4 @@ package version // Licensed under the MIT License. See License.txt in the project root for license information. // Number contains the semantic version of this SDK. -const Number = "v67.3.0" +const Number = "v68.0.0" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 1a9c8ab5..2a24ab80 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -127,6 +127,9 @@ type TokenRefreshCallback func(Token) error // TokenRefresh is a type representing a custom callback to refresh a token type TokenRefresh func(ctx context.Context, resource string) (*Token, error) +// JWTCallback is the type representing callback that will be called to get the federated OIDC JWT +type JWTCallback func() (string, error) + // Token encapsulates the access token used to authorize Azure requests. // https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response type Token struct { @@ -177,7 +180,7 @@ func (t Token) WillExpireIn(d time.Duration) bool { return !t.Expires().After(time.Now().Add(d)) } -//OAuthToken return the current access token +// OAuthToken return the current access token func (t *Token) OAuthToken() string { return t.AccessToken } @@ -367,14 +370,18 @@ func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, err // ServicePrincipalFederatedSecret implements ServicePrincipalSecret for Federated JWTs. type ServicePrincipalFederatedSecret struct { - jwt string + jwtCallback JWTCallback } // SetAuthenticationValues is a method of the interface ServicePrincipalSecret. // It will populate the form submitted during OAuth Token Acquisition using a JWT signed by an OIDC issuer. -func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { +func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(_ *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.jwtCallback() + if err != nil { + return err + } - v.Set("client_assertion", secret.jwt) + v.Set("client_assertion", jwt) v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") return nil } @@ -687,6 +694,8 @@ func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clie } // NewServicePrincipalTokenFromFederatedToken creates a ServicePrincipalToken from the supplied federated OIDC JWT. +// +// Deprecated: Use NewServicePrincipalTokenFromFederatedTokenWithCallback to refresh jwt dynamically. func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientID string, jwt string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { if err := validateOAuthConfig(oauthConfig); err != nil { return nil, err @@ -700,12 +709,37 @@ func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientI if jwt == "" { return nil, fmt.Errorf("parameter 'jwt' cannot be empty") } + return NewServicePrincipalTokenFromFederatedTokenCallback( + oauthConfig, + clientID, + func() (string, error) { + return jwt, nil + }, + resource, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromFederatedTokenCallback creates a ServicePrincipalToken from the supplied federated OIDC JWTCallback. +func NewServicePrincipalTokenFromFederatedTokenCallback(oauthConfig OAuthConfig, clientID string, jwtCallback JWTCallback, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if jwtCallback == nil { + return nil, fmt.Errorf("parameter 'jwtCallback' cannot be empty") + } return NewServicePrincipalTokenWithSecret( oauthConfig, clientID, resource, &ServicePrincipalFederatedSecret{ - jwt: jwt, + jwtCallback: jwtCallback, }, callbacks..., ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go index aafdf021..211c98d1 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -6,33 +6,33 @@ generated Go code. The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, and Responding. A typical pattern is: - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) Each phase relies on decorators to modify and / or manage processing. Decorators may first modify and then pass the data along, pass the data first and then modify the result, or wrap themselves around passing the data (such as a logger might do). Decorators run in the order provided. For example, the following: - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) will set the URL to: - https://microsoft.com/a/b/c + https://microsoft.com/a/b/c Preparers and Responders may be shared and re-used (assuming the underlying decorators support sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/README.md b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/README.md new file mode 100644 index 00000000..05bef8a8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/README.md @@ -0,0 +1,152 @@ +# NOTE: This module will go out of support by March 31, 2023. For authenticating with Azure AD, use module [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) instead. For help migrating from `auth` to `azidentiy` please consult the [migration guide](https://aka.ms/azsdk/go/identity/migration). General information about the retirement of this and other legacy modules can be found [here](https://azure.microsoft.com/updates/support-for-azure-sdk-libraries-that-do-not-conform-to-our-current-azure-sdk-guidelines-will-be-retired-as-of-31-march-2023/). + +## Authentication + +Typical SDK operations must be authenticated and authorized. The `autorest.Authorizer` +interface allows use of any auth style in requests, such as inserting an OAuth2 +Authorization header and bearer token received from Azure AD. + +The SDK itself provides a simple way to get an authorizer which first checks +for OAuth client credentials in environment variables and then falls back to +Azure's [Managed Service Identity]() when available, e.g. when on an Azure +VM. The following snippet from [the previous section](#use) demonstrates +this helper. + +```go +import "github.com/Azure/go-autorest/autorest/azure/auth" + +// create a VirtualNetworks client +vnetClient := network.NewVirtualNetworksClient("") + +// create an authorizer from env vars or Azure Managed Service Idenity +authorizer, err := auth.NewAuthorizerFromEnvironment() +if err != nil { + handle(err) +} + +vnetClient.Authorizer = authorizer + +// call the VirtualNetworks CreateOrUpdate API +vnetClient.CreateOrUpdate(context.Background(), +// ... +``` + +The following environment variables help determine authentication configuration: + +- `AZURE_ENVIRONMENT`: Specifies the Azure Environment to use. If not set, it + defaults to `AzurePublicCloud`. Not applicable to authentication with Managed + Service Identity (MSI). +- `AZURE_AD_RESOURCE`: Specifies the AAD resource ID to use. If not set, it + defaults to `ResourceManagerEndpoint` for operations with Azure Resource + Manager. You can also choose an alternate resource programmatically with + `auth.NewAuthorizerFromEnvironmentWithResource(resource string)`. + +### More Authentication Details + +The previous is the first and most recommended of several authentication +options offered by the SDK because it allows seamless use of both service +principals and [Azure Managed Service Identity][]. Other options are listed +below. + +> Note: If you need to create a new service principal, run `az ad sp create-for-rbac -n ""` in the +> [azure-cli](https://github.com/Azure/azure-cli). See [these +> docs](https://docs.microsoft.com/cli/azure/create-an-azure-service-principal-azure-cli?view=azure-cli-latest) +> for more info. Copy the new principal's ID, secret, and tenant ID for use in +> your app, or consider the `--sdk-auth` parameter for serialized output. + +[azure managed service identity]: https://docs.microsoft.com/azure/active-directory/msi-overview + +- The `auth.NewAuthorizerFromEnvironment()` described above creates an authorizer + from the first available of the following configuration: + + 1. **Client Credentials**: Azure AD Application ID and Secret. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CLIENT_SECRET`: Specifies the app secret to use. + + 2. **Client Certificate**: Azure AD Application ID and X.509 Certificate. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use. + - `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use. + + 3. **Resource Owner Password**: Azure AD User and Password. This grant type is *not + recommended*, use device login instead if you need interactive login. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_USERNAME`: Specifies the username to use. + - `AZURE_PASSWORD`: Specifies the password to use. + + 4. **Azure Managed Service Identity**: Delegate credential management to the + platform. Requires that code is running in Azure, e.g. on a VM. All + configuration is handled by Azure. See [Azure Managed Service + Identity](https://docs.microsoft.com/azure/active-directory/msi-overview) + for more details. + +- The `auth.NewAuthorizerFromFile()` method creates an authorizer using + credentials from an auth file created by the [Azure CLI][]. Follow these + steps to utilize: + + 1. Create a service principal and output an auth file using `az ad sp create-for-rbac --sdk-auth > client_credentials.json`. + 2. Set environment variable `AZURE_AUTH_LOCATION` to the path of the saved + output file. + 3. Use the authorizer returned by `auth.NewAuthorizerFromFile()` in your + client as described above. + +- The `auth.NewAuthorizerFromCLI()` method creates an authorizer which + uses [Azure CLI][] to obtain its credentials. + + The default audience being requested is `https://management.azure.com` (Azure ARM API). + To specify your own audience, export `AZURE_AD_RESOURCE` as an evironment variable. + This is read by `auth.NewAuthorizerFromCLI()` and passed to Azure CLI to acquire the access token. + + For example, to request an access token for Azure Key Vault, export + ``` + AZURE_AD_RESOURCE="https://vault.azure.net" + ``` + +- `auth.NewAuthorizerFromCLIWithResource(AUDIENCE_URL_OR_APPLICATION_ID)` - this method is self contained and does + not require exporting environment variables. For example, to request an access token for Azure Key Vault: + ``` + auth.NewAuthorizerFromCLIWithResource("https://vault.azure.net") + ``` + + To use `NewAuthorizerFromCLI()` or `NewAuthorizerFromCLIWithResource()`, follow these steps: + + 1. Install [Azure CLI v2.0.12](https://docs.microsoft.com/cli/azure/install-azure-cli) or later. Upgrade earlier versions. + 2. Use `az login` to sign in to Azure. + + If you receive an error, use `az account get-access-token` to verify access. + + If Azure CLI is not installed to the default directory, you may receive an error + reporting that `az` cannot be found. + Use the `AzureCLIPath` environment variable to define the Azure CLI installation folder. + + If you are signed in to Azure CLI using multiple accounts or your account has + access to multiple subscriptions, you need to specify the specific subscription + to be used. To do so, use: + + ``` + az account set --subscription + ``` + + To verify the current account settings, use: + + ``` + az account list + ``` + +[azure cli]: https://github.com/Azure/azure-cli + +- Finally, you can use OAuth's [Device Flow][] by calling + `auth.NewDeviceFlowConfig()` and extracting the Authorizer as follows: + + ```go + config := auth.NewDeviceFlowConfig(clientID, tenantID) + a, err := config.Authorizer() + ``` + +[device flow]: https://oauth.net/2/device-flow/ diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go index 2f1a9981..e97589dc 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -250,6 +250,17 @@ func NewAuthorizerFromFile(resourceBaseURI string) (autorest.Authorizer, error) if err != nil { return nil, err } + return settings.GetAuthorizer(resourceBaseURI) +} + +// GetAuthorizer create an Authorizer in the following order. +// 1. Client credentials +// 2. Client certificate +// resourceBaseURI - used to determine the resource type +func (settings FileSettings) GetAuthorizer(resourceBaseURI string) (autorest.Authorizer, error) { + if resourceBaseURI == "" { + resourceBaseURI = azure.PublicCloud.ServiceManagementEndpoint + } if a, err := settings.ClientCredentialsAuthorizer(resourceBaseURI); err == nil { return a, err } @@ -559,7 +570,7 @@ func NewDeviceFlowConfig(clientID string, tenantID string) DeviceFlowConfig { } } -//AuthorizerConfig provides an authorizer from the configuration provided. +// AuthorizerConfig provides an authorizer from the configuration provided. type AuthorizerConfig interface { Authorizer() (autorest.Authorizer, error) } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go index 1328f176..868345db 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -214,7 +214,7 @@ func (r Resource) String() string { // See https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/template-functions-resource?tabs=json#resourceid. func ParseResourceID(resourceID string) (Resource, error) { - const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + const resourceIDPatternText = `(?i)^/subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)$` resourceIDPattern := regexp.MustCompile(resourceIDPatternText) match := resourceIDPattern.FindStringSubmatch(resourceID) diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go deleted file mode 100644 index 86694bd2..00000000 --- a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Package to provides helpers to ease working with pointer values of marshalled structures. -*/ -package to - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// String returns a string value for the passed string pointer. It returns the empty string if the -// pointer is nil. -func String(s *string) string { - if s != nil { - return *s - } - return "" -} - -// StringPtr returns a pointer to the passed string. -func StringPtr(s string) *string { - return &s -} - -// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil -// slice if the pointer is nil. -func StringSlice(s *[]string) []string { - if s != nil { - return *s - } - return nil -} - -// StringSlicePtr returns a pointer to the passed string slice. -func StringSlicePtr(s []string) *[]string { - return &s -} - -// StringMap returns a map of strings built from the map of string pointers. The empty string is -// used for nil pointers. -func StringMap(msp map[string]*string) map[string]string { - ms := make(map[string]string, len(msp)) - for k, sp := range msp { - if sp != nil { - ms[k] = *sp - } else { - ms[k] = "" - } - } - return ms -} - -// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. -func StringMapPtr(ms map[string]string) *map[string]*string { - msp := make(map[string]*string, len(ms)) - for k, s := range ms { - msp[k] = StringPtr(s) - } - return &msp -} - -// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. -func Bool(b *bool) bool { - if b != nil { - return *b - } - return false -} - -// BoolPtr returns a pointer to the passed bool. -func BoolPtr(b bool) *bool { - return &b -} - -// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int(i *int) int { - if i != nil { - return *i - } - return 0 -} - -// IntPtr returns a pointer to the passed int. -func IntPtr(i int) *int { - return &i -} - -// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int32(i *int32) int32 { - if i != nil { - return *i - } - return 0 -} - -// Int32Ptr returns a pointer to the passed int32. -func Int32Ptr(i int32) *int32 { - return &i -} - -// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int64(i *int64) int64 { - if i != nil { - return *i - } - return 0 -} - -// Int64Ptr returns a pointer to the passed int64. -func Int64Ptr(i int64) *int64 { - return &i -} - -// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. -func Float32(i *float32) float32 { - if i != nil { - return *i - } - return 0.0 -} - -// Float32Ptr returns a pointer to the passed float32. -func Float32Ptr(i float32) *float32 { - return &i -} - -// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. -func Float64(i *float64) float64 { - if i != nil { - return *i - } - return 0.0 -} - -// Float64Ptr returns a pointer to the passed float64. -func Float64Ptr(i float64) *float64 { - return &i -} - -// ByteSlicePtr returns a pointer to the passed byte slice. -func ByteSlicePtr(b []byte) *[]byte { - return &b -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go deleted file mode 100644 index b7310f6b..00000000 --- a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package to - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go index 3467b8fa..d35b3850 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -60,9 +60,9 @@ func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { // is especially useful if there is a chance the data will fail to decode. // encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v // is the decoding destination. -func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { - b := bytes.Buffer{} - return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (b bytes.Buffer, err error) { + err = NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) + return } // TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/error.go b/vendor/github.com/Azure/go-autorest/autorest/validation/error.go deleted file mode 100644 index fed156db..00000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/error.go +++ /dev/null @@ -1,48 +0,0 @@ -package validation - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" -) - -// Error is the type that's returned when the validation of an APIs arguments constraints fails. -type Error struct { - // PackageType is the package type of the object emitting the error. For types, the value - // matches that produced the the '%T' format specifier of the fmt package. For other elements, - // such as functions, it is just the package name (e.g., "autorest"). - PackageType string - - // Method is the name of the method raising the error. - Method string - - // Message is the error message. - Message string -} - -// Error returns a string containing the details of the validation failure. -func (e Error) Error() string { - return fmt.Sprintf("%s#%s: Invalid input: %s", e.PackageType, e.Method, e.Message) -} - -// NewError creates a new Error object with the specified parameters. -// message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) Error { - return Error{ - PackageType: packageType, - Method: method, - Message: fmt.Sprintf(message, args...), - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go deleted file mode 100644 index cf143629..00000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package validation - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go deleted file mode 100644 index ff41cfe0..00000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go +++ /dev/null @@ -1,406 +0,0 @@ -/* -Package validation provides methods for validating parameter value using reflection. -*/ -package validation - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "reflect" - "regexp" - "strings" -) - -// Disabled controls if parameter validation should be globally disabled. The default is false. -var Disabled bool - -// Constraint stores constraint name, target field name -// Rule and chain validations. -type Constraint struct { - - // Target field name for validation. - Target string - - // Constraint name e.g. minLength, MaxLength, Pattern, etc. - Name string - - // Rule for constraint e.g. greater than 10, less than 5 etc. - Rule interface{} - - // Chain Validations for struct type - Chain []Constraint -} - -// Validation stores parameter-wise validation. -type Validation struct { - TargetValue interface{} - Constraints []Constraint -} - -// Constraint list -const ( - Empty = "Empty" - Null = "Null" - ReadOnly = "ReadOnly" - Pattern = "Pattern" - MaxLength = "MaxLength" - MinLength = "MinLength" - MaxItems = "MaxItems" - MinItems = "MinItems" - MultipleOf = "MultipleOf" - UniqueItems = "UniqueItems" - InclusiveMaximum = "InclusiveMaximum" - ExclusiveMaximum = "ExclusiveMaximum" - ExclusiveMinimum = "ExclusiveMinimum" - InclusiveMinimum = "InclusiveMinimum" -) - -// Validate method validates constraints on parameter -// passed in validation array. -func Validate(m []Validation) error { - if Disabled { - return nil - } - for _, item := range m { - v := reflect.ValueOf(item.TargetValue) - for _, constraint := range item.Constraints { - var err error - switch v.Kind() { - case reflect.Ptr: - err = validatePtr(v, constraint) - case reflect.String: - err = validateString(v, constraint) - case reflect.Struct: - err = validateStruct(v, constraint) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - err = validateInt(v, constraint) - case reflect.Float32, reflect.Float64: - err = validateFloat(v, constraint) - case reflect.Array, reflect.Slice, reflect.Map: - err = validateArrayMap(v, constraint) - default: - err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) - } - - if err != nil { - return err - } - } - } - return nil -} - -func validateStruct(x reflect.Value, v Constraint, name ...string) error { - //Get field name from target name which is in format a.b.c - s := strings.Split(v.Target, ".") - f := x.FieldByName(s[len(s)-1]) - if isZero(f) { - return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) - } - - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(f), - Constraints: []Constraint{v}, - }, - }) -} - -func validatePtr(x reflect.Value, v Constraint) error { - if v.Name == ReadOnly { - if !x.IsNil() { - return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") - } - return nil - } - if x.IsNil() { - return checkNil(x, v) - } - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x.Elem()), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func validateInt(x reflect.Value, v Constraint) error { - i := x.Int() - r, ok := toInt64(v.Rule) - if !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - switch v.Name { - case MultipleOf: - if i%r != 0 { - return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) - } - case ExclusiveMinimum: - if i <= r { - return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) - } - case ExclusiveMaximum: - if i >= r { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case InclusiveMinimum: - if i < r { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case InclusiveMaximum: - if i > r { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name)) - } - return nil -} - -func validateFloat(x reflect.Value, v Constraint) error { - f := x.Float() - r, ok := v.Rule.(float64) - if !ok { - return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule)) - } - switch v.Name { - case ExclusiveMinimum: - if f <= r { - return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) - } - case ExclusiveMaximum: - if f >= r { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case InclusiveMinimum: - if f < r { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case InclusiveMaximum: - if f > r { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name)) - } - return nil -} - -func validateString(x reflect.Value, v Constraint) error { - s := x.String() - switch v.Name { - case Empty: - if len(s) == 0 { - return checkEmpty(x, v) - } - case Pattern: - reg, err := regexp.Compile(v.Rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - if !reg.MatchString(s) { - return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule)) - } - case MaxLength: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - if len(s) > v.Rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule)) - } - case MinLength: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - if len(s) < v.Rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule)) - } - case ReadOnly: - if len(s) > 0 { - return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name)) - } - - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func validateArrayMap(x reflect.Value, v Constraint) error { - switch v.Name { - case Null: - if x.IsNil() { - return checkNil(x, v) - } - case Empty: - if x.IsNil() || x.Len() == 0 { - return checkEmpty(x, v) - } - case MaxItems: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) - } - if x.Len() > v.Rule.(int) { - return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len())) - } - case MinItems: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) - } - if x.Len() < v.Rule.(int) { - return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len())) - } - case UniqueItems: - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - if !checkForUniqueInArray(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) - } - } else if x.Kind() == reflect.Map { - if !checkForUniqueInMap(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) - } - } else { - return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind())) - } - case ReadOnly: - if x.Len() != 0 { - return createError(x, v, "readonly parameter; must send as nil or empty in request") - } - case Pattern: - reg, err := regexp.Compile(v.Rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - keys := x.MapKeys() - for _, k := range keys { - if !reg.MatchString(k.String()) { - return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule)) - } - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) - } - - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func checkNil(x reflect.Value, v Constraint) error { - if _, ok := v.Rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) - } - if v.Rule.(bool) { - return createError(x, v, "value can not be null; required parameter") - } - return nil -} - -func checkEmpty(x reflect.Value, v Constraint) error { - if _, ok := v.Rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) - } - - if v.Rule.(bool) { - return createError(x, v, "value can not be null or empty; required parameter") - } - return nil -} - -func checkForUniqueInArray(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - arrOfInterface := make([]interface{}, x.Len()) - - for i := 0; i < x.Len(); i++ { - arrOfInterface[i] = x.Index(i).Interface() - } - - m := make(map[interface{}]bool) - for _, val := range arrOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func checkForUniqueInMap(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - mapOfInterface := make(map[interface{}]interface{}, x.Len()) - - keys := x.MapKeys() - for _, k := range keys { - mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() - } - - m := make(map[interface{}]bool) - for _, val := range mapOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func getInterfaceValue(x reflect.Value) interface{} { - if x.Kind() == reflect.Invalid { - return nil - } - return x.Interface() -} - -func isZero(x interface{}) bool { - return x == reflect.Zero(reflect.TypeOf(x)).Interface() -} - -func createError(x reflect.Value, v Constraint, err string) error { - return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s", - v.Target, v.Name, getInterfaceValue(x), err) -} - -func toInt64(v interface{}) (int64, bool) { - if i64, ok := v.(int64); ok { - return i64, true - } - // older generators emit max constants as int, so if int64 fails fall back to int - if i32, ok := v.(int); ok { - return int64(i32), true - } - return 0, false -} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE new file mode 100644 index 00000000..3d8b93bc --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go new file mode 100644 index 00000000..19210883 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package cache allows third parties to implement external storage for caching token data +for distributed systems or multiple local applications access. + +The data stored and extracted will represent the entire cache. Therefore it is recommended +one msal instance per user. This data is considered opaque and there are no guarantees to +implementers on the format being passed. +*/ +package cache + +import "context" + +// Marshaler marshals data from an internal cache to bytes that can be stored. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Unmarshaler unmarshals data from a storage medium into the internal cache, overwriting it. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Serializer can serialize the cache to binary or from binary into the cache. +type Serializer interface { + Marshaler + Unmarshaler +} + +// ExportHints are suggestions for storing data. +type ExportHints struct { + // PartitionKey is a suggested key for partitioning the cache + PartitionKey string +} + +// ReplaceHints are suggestions for loading data. +type ReplaceHints struct { + // PartitionKey is a suggested key for partitioning the cache + PartitionKey string +} + +// ExportReplace exports and replaces in-memory cache data. It doesn't support nil Context or +// define the outcome of passing one. A Context without a timeout must receive a default timeout +// specified by the implementor. Retries must be implemented inside the implementation. +type ExportReplace interface { + // Replace replaces the cache with what is in external storage. Implementors should honor + // Context cancellations and return context.Canceled or context.DeadlineExceeded in those cases. + Replace(ctx context.Context, cache Unmarshaler, hints ReplaceHints) error + // Export writes the binary representation of the cache (cache.Marshal()) to external storage. + // This is considered opaque. Context cancellations should be honored as in Replace. + Export(ctx context.Context, cache Marshaler, hints ExportHints) error +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go new file mode 100644 index 00000000..6612feb4 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -0,0 +1,685 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package confidential provides a client for authentication of "confidential" applications. +A "confidential" application is defined as an app that run on servers. They are considered +difficult to access and for that reason capable of keeping an application secret. +Confidential clients can hold configuration-time secrets. +*/ +package confidential + +import ( + "context" + "crypto" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +/* +Design note: + +confidential.Client uses base.Client as an embedded type. base.Client statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. + +Duplicate Calls shared between public.Client and this package: +There is some duplicate call options provided here that are the same as in public.Client . This +is a design choices. Go proverb(https://www.youtube.com/watch?v=PAAkCSZUG1c&t=9m28s): +"a little copying is better than a little dependency". Yes, we could have another package with +shared options (fail). That divides like 2 options from all others which makes the user look +through more docs. We can have all clients in one package, but I think separate packages +here makes for better naming (public.Client vs client.PublicClient). So I chose a little +duplication. + +.Net People, Take note on X509: +This uses x509.Certificates and private keys. x509 does not store private keys. .Net +has some x509.Certificate2 thing that has private keys, but that is just some bullcrap that .Net +added, it doesn't exist in real life. As such I've put a PEM decoder into here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be include in the package documentation. + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type Account = shared.Account + +// CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file +// must contain the public certificate and the private key. If a PEM block is encrypted and +// password is not an empty string, it attempts to decrypt the PEM blocks using the password. +// Multiple certs are due to certificate chaining for use cases like TLS that sign from root to leaf. +func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.PrivateKey, error) { + var certs []*x509.Certificate + var priv crypto.PrivateKey + for { + block, rest := pem.Decode(pemData) + if block == nil { + break + } + + //nolint:staticcheck // x509.IsEncryptedPEMBlock and x509.DecryptPEMBlock are deprecated. They are used here only to support a usecase. + if x509.IsEncryptedPEMBlock(block) { + b, err := x509.DecryptPEMBlock(block, []byte(password)) + if err != nil { + return nil, nil, fmt.Errorf("could not decrypt encrypted PEM block: %v", err) + } + block, _ = pem.Decode(b) + if block == nil { + return nil, nil, fmt.Errorf("encounter encrypted PEM block that did not decode") + } + } + + switch block.Type { + case "CERTIFICATE": + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("block labelled 'CERTIFICATE' could not be parsed by x509: %v", err) + } + certs = append(certs, cert) + case "PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + + var err error + priv, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + case "RSA PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + var err error + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + } + pemData = rest + } + + if len(certs) == 0 { + return nil, nil, fmt.Errorf("no certificates found") + } + + if priv == nil { + return nil, nil, fmt.Errorf("no private key found") + } + + return certs, priv, nil +} + +// AssertionRequestOptions has required information for client assertion claims +type AssertionRequestOptions = exported.AssertionRequestOptions + +// Credential represents the credential used in confidential client flows. +type Credential struct { + secret string + + cert *x509.Certificate + key crypto.PrivateKey + x5c []string + + assertionCallback func(context.Context, AssertionRequestOptions) (string, error) + + tokenProvider func(context.Context, TokenProviderParameters) (TokenProviderResult, error) +} + +// toInternal returns the accesstokens.Credential that is used internally. The current structure of the +// code requires that client.go, requests.go and confidential.go share a credential type without +// having import recursion. That requires the type used between is in a shared package. Therefore +// we have this. +func (c Credential) toInternal() (*accesstokens.Credential, error) { + if c.secret != "" { + return &accesstokens.Credential{Secret: c.secret}, nil + } + if c.cert != nil { + if c.key == nil { + return nil, errors.New("missing private key for certificate") + } + return &accesstokens.Credential{Cert: c.cert, Key: c.key, X5c: c.x5c}, nil + } + if c.key != nil { + return nil, errors.New("missing certificate for private key") + } + if c.assertionCallback != nil { + return &accesstokens.Credential{AssertionCallback: c.assertionCallback}, nil + } + if c.tokenProvider != nil { + return &accesstokens.Credential{TokenProvider: c.tokenProvider}, nil + } + return nil, errors.New("invalid credential") +} + +// NewCredFromSecret creates a Credential from a secret. +func NewCredFromSecret(secret string) (Credential, error) { + if secret == "" { + return Credential{}, errors.New("secret can't be empty string") + } + return Credential{secret: secret}, nil +} + +// NewCredFromAssertionCallback creates a Credential that invokes a callback to get assertions +// authenticating the application. The callback must be thread safe. +func NewCredFromAssertionCallback(callback func(context.Context, AssertionRequestOptions) (string, error)) Credential { + return Credential{assertionCallback: callback} +} + +// NewCredFromCert creates a Credential from a certificate or chain of certificates and an RSA private key +// as returned by [CertFromPEM]. +func NewCredFromCert(certs []*x509.Certificate, key crypto.PrivateKey) (Credential, error) { + cred := Credential{key: key} + k, ok := key.(*rsa.PrivateKey) + if !ok { + return cred, errors.New("key must be an RSA key") + } + for _, cert := range certs { + if cert == nil { + // not returning an error here because certs may still contain a sufficient cert/key pair + continue + } + certKey, ok := cert.PublicKey.(*rsa.PublicKey) + if ok && k.E == certKey.E && k.N.Cmp(certKey.N) == 0 { + // We know this is the signing cert because its public key matches the given private key. + // This cert must be first in x5c. + cred.cert = cert + cred.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cred.x5c...) + } else { + cred.x5c = append(cred.x5c, base64.StdEncoding.EncodeToString(cert.Raw)) + } + } + if cred.cert == nil { + return cred, errors.New("key doesn't match any certificate") + } + return cred, nil +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters = exported.TokenProviderParameters + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult = exported.TokenProviderResult + +// NewCredFromTokenProvider creates a Credential from a function that provides access tokens. The function +// must be concurrency safe. This is intended only to allow the Azure SDK to cache MSI tokens. It isn't +// useful to applications in general because the token provider must implement all authentication logic. +func NewCredFromTokenProvider(provider func(context.Context, TokenProviderParameters) (TokenProviderResult, error)) Credential { + return Credential{tokenProvider: provider} +} + +// AutoDetectRegion instructs MSAL Go to auto detect region for Azure regional token service. +func AutoDetectRegion() string { + return "TryAutoDetect" +} + +// Client is a representation of authentication client for confidential applications as defined in the +// package doc. A new Client should be created PER SERVICE USER. +// For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications +type Client struct { + base base.Client + cred *accesstokens.Credential +} + +// clientOptions are optional settings for New(). These options are set using various functions +// returning Option calls. +type clientOptions struct { + accessor cache.ExportReplace + authority, azureRegion string + capabilities []string + disableInstanceDiscovery, sendX5C bool + httpClient ops.HTTPClient +} + +// Option is an optional argument to New(). +type Option func(o *clientOptions) + +// WithCache provides an accessor that will read and write authentication data to an externally managed cache. +func WithCache(accessor cache.ExportReplace) Option { + return func(o *clientOptions) { + o.accessor = accessor + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(o *clientOptions) { + // there's no danger of sharing the slice's underlying memory with the application because + // this slice is simply passed to base.WithClientCapabilities, which copies its data + o.capabilities = capabilities + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *clientOptions) { + o.httpClient = httpClient + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C() Option { + return func(o *clientOptions) { + o.sendX5C = true + } +} + +// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios) +func WithInstanceDiscovery(enabled bool) Option { + return func(o *clientOptions) { + o.disableInstanceDiscovery = !enabled + } +} + +// WithAzureRegion sets the region(preferred) or Confidential.AutoDetectRegion() for auto detecting region. +// Region names as per https://azure.microsoft.com/en-ca/global-infrastructure/geographies/. +// See https://aka.ms/region-map for more details on region names. +// The region value should be short region name for the region where the service is deployed. +// For example "centralus" is short name for region Central US. +// Not all auth flows can use the regional token service. +// Service To Service (client credential flow) tokens can be obtained from the regional service. +// Requires configuration at the tenant level. +// Auto-detection works on a limited number of Azure artifacts (VMs, Azure functions). +// If auto-detection fails, the non-regional endpoint will be used. +// If an invalid region name is provided, the non-regional endpoint MIGHT be used or the token request MIGHT fail. +func WithAzureRegion(val string) Option { + return func(o *clientOptions) { + o.azureRegion = val + } +} + +// New is the constructor for Client. authority is the URL of a token authority such as "https://login.microsoftonline.com/". +// If the Client will connect directly to AD FS, use "adfs" for the tenant. clientID is the application's client ID (also called its +// "application ID"). +func New(authority, clientID string, cred Credential, options ...Option) (Client, error) { + internalCred, err := cred.toInternal() + if err != nil { + return Client{}, err + } + + opts := clientOptions{ + authority: authority, + // if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache + disableInstanceDiscovery: cred.tokenProvider != nil, + httpClient: shared.DefaultClient, + } + for _, o := range options { + o(&opts) + } + baseOpts := []base.Option{ + base.WithCacheAccessor(opts.accessor), + base.WithClientCapabilities(opts.capabilities), + base.WithInstanceDiscovery(!opts.disableInstanceDiscovery), + base.WithRegionDetection(opts.azureRegion), + base.WithX5C(opts.sendX5C), + } + base, err := base.New(clientID, opts.authority, oauth.New(opts.httpClient), baseOpts...) + if err != nil { + return Client{}, err + } + base.AuthParams.IsConfidentialClient = true + + return Client{base: base, cred: internalCred}, nil +} + +// authCodeURLOptions contains options for AuthCodeURL +type authCodeURLOptions struct { + claims, loginHint, tenantID, domainHint string +} + +// AuthCodeURLOption is implemented by options for AuthCodeURL +type AuthCodeURLOption interface { + authCodeURLOption() +} + +// AuthCodeURL creates a URL used to acquire an authorization code. Users need to call CreateAuthorizationCodeURLParameters and pass it in. +// +// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID] +func (cca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) { + o := authCodeURLOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return "", err + } + ap, err := cca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return "", err + } + ap.Claims = o.claims + ap.LoginHint = o.loginHint + ap.DomainHint = o.domainHint + return cca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap) +} + +// WithLoginHint pre-populates the login prompt with a username. +func WithLoginHint(username string) interface { + AuthCodeURLOption + options.CallOption +} { + return struct { + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.loginHint = username + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url. +func WithDomainHint(domain string) interface { + AuthCodeURLOption + options.CallOption +} { + return struct { + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.domainHint = domain + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithClaims sets additional claims to request for the token, such as those required by conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +// This option is valid for any token acquisition method. +func WithClaims(claims string) interface { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.claims = claims + case *acquireTokenByCredentialOptions: + t.claims = claims + case *acquireTokenOnBehalfOfOptions: + t.claims = claims + case *acquireTokenSilentOptions: + t.claims = claims + case *authCodeURLOptions: + t.claims = claims + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New]. +// This option is valid for any token acquisition method. +func WithTenantID(tenantID string) interface { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.tenantID = tenantID + case *acquireTokenByCredentialOptions: + t.tenantID = tenantID + case *acquireTokenOnBehalfOfOptions: + t.tenantID = tenantID + case *acquireTokenSilentOptions: + t.tenantID = tenantID + case *authCodeURLOptions: + t.tenantID = tenantID + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// acquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type acquireTokenSilentOptions struct { + account Account + claims, tenantID string +} + +// AcquireSilentOption is implemented by options for AcquireTokenSilent +type AcquireSilentOption interface { + acquireSilentOption() +} + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) interface { + AcquireSilentOption + options.CallOption +} { + return struct { + AcquireSilentOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.account = account + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +// +// Options: [WithClaims], [WithSilentAccount], [WithTenantID] +func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) { + o := acquireTokenSilentOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + if o.claims != "" { + return AuthResult{}, errors.New("call another AcquireToken method to request a new token having these claims") + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: o.account, + RequestType: accesstokens.ATConfidential, + Credential: cca.cred, + IsAppCache: o.account.IsZero(), + TenantID: o.tenantID, + } + + return cca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type acquireTokenByAuthCodeOptions struct { + challenge, claims, tenantID string +} + +// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode +type AcquireByAuthCodeOption interface { + acquireByAuthCodeOption() +} + +// WithChallenge allows you to provide a challenge for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) interface { + AcquireByAuthCodeOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.challenge = challenge + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +// +// Options: [WithChallenge], [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) { + o := acquireTokenByAuthCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: o.challenge, + Claims: o.claims, + AppType: accesstokens.ATConfidential, + Credential: cca.cred, // This setting differs from public.Client.AcquireTokenByAuthCode + RedirectURI: redirectURI, + TenantID: o.tenantID, + } + + return cca.base.AcquireTokenByAuthCode(ctx, params) +} + +// acquireTokenByCredentialOptions contains optional configuration for AcquireTokenByCredential +type acquireTokenByCredentialOptions struct { + claims, tenantID string +} + +// AcquireByCredentialOption is implemented by options for AcquireTokenByCredential +type AcquireByCredentialOption interface { + acquireByCredOption() +} + +// AcquireTokenByCredential acquires a security token from the authority, using the client credentials grant. +// +// Options: [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string, opts ...AcquireByCredentialOption) (AuthResult, error) { + o := acquireTokenByCredentialOptions{} + err := options.ApplyOptions(&o, opts) + if err != nil { + return AuthResult{}, err + } + authParams, err := cca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATClientCredentials + authParams.Claims = o.claims + + token, err := cca.base.Token.Credential(ctx, authParams, cca.cred) + if err != nil { + return AuthResult{}, err + } + return cca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +// acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf +type acquireTokenOnBehalfOfOptions struct { + claims, tenantID string +} + +// AcquireOnBehalfOfOption is implemented by options for AcquireTokenOnBehalfOf +type AcquireOnBehalfOfOption interface { + acquireOBOOption() +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +// Refer https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow. +// +// Options: [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, opts ...AcquireOnBehalfOfOption) (AuthResult, error) { + o := acquireTokenOnBehalfOfOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + params := base.AcquireTokenOnBehalfOfParameters{ + Scopes: scopes, + UserAssertion: userAssertion, + Claims: o.claims, + Credential: cca.cred, + TenantID: o.tenantID, + } + return cca.base.AcquireTokenOnBehalfOf(ctx, params) +} + +// Account gets the account in the token cache with the specified homeAccountID. +func (cca Client) Account(ctx context.Context, accountID string) (Account, error) { + return cca.base.Account(ctx, accountID) +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (cca Client) RemoveAccount(ctx context.Context, account Account) error { + return cca.base.RemoveAccount(ctx, account) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md new file mode 100644 index 00000000..7ef7862f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md @@ -0,0 +1,111 @@ +# MSAL Error Design + +Author: Abhidnya Patil(abhidnya.patil@microsoft.com) + +Contributors: + +- John Doak(jdoak@microsoft.com) +- Keegan Caruso(Keegan.Caruso@microsoft.com) +- Joel Hendrix(jhendrix@microsoft.com) + +## Background + +Errors in MSAL are intended for app developers to troubleshoot and not for displaying to end-users. + +### Go error handling vs other MSAL languages + +Most modern languages use exception based errors. Simply put, you "throw" an exception and it must be caught at some routine in the upper stack or it will eventually crash the program. + +Go doesn't use exceptions, instead it relies on multiple return values, one of which can be the builtin error interface type. It is up to the user to decide what to do. + +### Go custom error types + +Errors can be created in Go by simply using errors.New() or fmt.Errorf() to create an "error". + +Custom errors can be created in multiple ways. One of the more robust ways is simply to satisfy the error interface: + +```go +type MyCustomErr struct { + Msg string +} +func (m MyCustomErr) Error() string { // This implements "error" + return m.Msg +} +``` + +### MSAL Error Goals + +- Provide diagnostics to the user and for tickets that can be used to track down bugs or client misconfigurations +- Detect errors that are transitory and can be retried +- Allow the user to identify certain errors that the program can respond to, such a informing the user for the need to do an enrollment + +## Implementing Client Side Errors + +Client side errors indicate a misconfiguration or passing of bad arguments that is non-recoverable. Retrying isn't possible. + +These errors can simply be standard Go errors created by errors.New() or fmt.Errorf(). If down the line we need a custom error, we can introduce it, but for now the error messages just need to be clear on what the issue was. + +## Implementing Service Side Errors + +Service side errors occur when an external RPC responds either with an HTTP error code or returns a message that includes an error. + +These errors can be transitory (please slow down) or permanent (HTTP 404). To provide our diagnostic goals, we require the ability to differentiate these errors from other errors. + +The current implementation includes a specialized type that captures any error from the server: + +```go +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS stuff we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} +``` + +A user will always receive the most concise error we provide. They can tell if it is a server side error using Go error package: + +```go +var callErr CallErr +if errors.As(err, &callErr) { + ... +} +``` + +We provide a Verbose() function that can retrieve the most verbose message from any error we provide: + +```go +fmt.Println(errors.Verbose(err)) +``` + +If further differentiation is required, we can add custom errors that use Go error wrapping on top of CallErr to achieve our diagnostic goals (such as detecting when to retry a call due to transient errors). + +CallErr is always thrown from the comm package (which handles all http requests) and looks similar to: + +```go +return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, ErrorResponse), //ErrorResponse is the json body extracted from the http response + } +``` + +## Future Decisions + +The ability to retry calls needs to have centralized responsibility. Either the user is doing it or the client is doing it. + +If the user should be responsible, our errors package will include a CanRetry() function that will inform the user if the error provided to them is retryable. This is based on the http error code and possibly the type of error that was returned. It would also include a sleep time if the server returned an amount of time to wait. + +Otherwise we will do this internally and retries will be left to us. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go new file mode 100644 index 00000000..c9b8dbed --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package errors + +import ( + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strings" + + "github.com/kylelemons/godebug/pretty" +) + +var prettyConf = &pretty.Config{ + IncludeUnexported: false, + SkipZeroFields: true, + TrackCycles: true, + Formatter: map[reflect.Type]interface{}{ + reflect.TypeOf((*io.Reader)(nil)).Elem(): func(r io.Reader) string { + b, err := io.ReadAll(r) + if err != nil { + return "could not read io.Reader content" + } + return string(b) + }, + }, +} + +type verboser interface { + Verbose() string +} + +// Verbose prints the most verbose error that the error message has. +func Verbose(err error) string { + build := strings.Builder{} + for { + if err == nil { + break + } + if v, ok := err.(verboser); ok { + build.WriteString(v.Verbose()) + } else { + build.WriteString(err.Error()) + } + err = errors.Unwrap(err) + } + return build.String() +} + +// New is equivalent to errors.New(). +func New(text string) error { + return errors.New(text) +} + +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + // Resp contains response body + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} + +// Is reports whether any error in errors chain matches target. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As finds the first error in errors chain that matches target, +// and if so, sets target to that error value and returns true. +// Otherwise, it returns false. +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go new file mode 100644 index 00000000..5f68384f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -0,0 +1,467 @@ +// Package base contains a "Base" client that is used by the external public.Client and confidential.Client. +// Base holds shared attributes that must be available to both clients and methods that act as +// shared calls. +package base + +import ( + "context" + "errors" + "fmt" + "net/url" + "reflect" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +const ( + // AuthorityPublicCloud is the default AAD authority host + AuthorityPublicCloud = "https://login.microsoftonline.com/common" + scopeSeparator = " " +) + +// manager provides an internal cache. It is defined to allow faking the cache in tests. +// In production it's a *storage.Manager or *storage.PartitionedManager. +type manager interface { + cache.Serializer + Read(context.Context, authority.AuthParams) (storage.TokenResponse, error) + Write(authority.AuthParams, accesstokens.TokenResponse) (shared.Account, error) +} + +// accountManager is a manager that also caches accounts. In production it's a *storage.Manager. +type accountManager interface { + manager + AllAccounts() []shared.Account + Account(homeAccountID string) shared.Account + RemoveAccount(account shared.Account, clientID string) +} + +// AcquireTokenSilentParameters contains the parameters to acquire a token silently (from cache). +type AcquireTokenSilentParameters struct { + Scopes []string + Account shared.Account + RequestType accesstokens.AppType + Credential *accesstokens.Credential + IsAppCache bool + TenantID string + UserAssertion string + AuthorizationType authority.AuthorizeType + Claims string +} + +// AcquireTokenAuthCodeParameters contains the parameters required to acquire an access token using the auth code flow. +// To use PKCE, set the CodeChallengeParameter. +// Code challenges are used to secure authorization code grants; for more information, visit +// https://tools.ietf.org/html/rfc7636. +type AcquireTokenAuthCodeParameters struct { + Scopes []string + Code string + Challenge string + Claims string + RedirectURI string + AppType accesstokens.AppType + Credential *accesstokens.Credential + TenantID string +} + +type AcquireTokenOnBehalfOfParameters struct { + Scopes []string + Claims string + Credential *accesstokens.Credential + TenantID string + UserAssertion string +} + +// AuthResult contains the results of one token acquisition operation in PublicClientApplication +// or ConfidentialClientApplication. For details see https://aka.ms/msal-net-authenticationresult +type AuthResult struct { + Account shared.Account + IDToken accesstokens.IDToken + AccessToken string + ExpiresOn time.Time + GrantedScopes []string + DeclinedScopes []string +} + +// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). +func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) { + if err := storageTokenResponse.AccessToken.Validate(); err != nil { + return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err) + } + + account := storageTokenResponse.Account + accessToken := storageTokenResponse.AccessToken.Secret + grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator) + + // Checking if there was an ID token in the cache; this will throw an error in the case of confidential client applications. + var idToken accesstokens.IDToken + if !storageTokenResponse.IDToken.IsZero() { + err := idToken.UnmarshalJSON([]byte(storageTokenResponse.IDToken.Secret)) + if err != nil { + return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err) + } + } + return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil +} + +// NewAuthResult creates an AuthResult. +func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Account) (AuthResult, error) { + if len(tokenResponse.DeclinedScopes) > 0 { + return AuthResult{}, fmt.Errorf("token response failed because declined scopes are present: %s", strings.Join(tokenResponse.DeclinedScopes, ",")) + } + return AuthResult{ + Account: account, + IDToken: tokenResponse.IDToken, + AccessToken: tokenResponse.AccessToken, + ExpiresOn: tokenResponse.ExpiresOn.T, + GrantedScopes: tokenResponse.GrantedScopes.Slice, + }, nil +} + +// Client is a base client that provides access to common methods and primatives that +// can be used by multiple clients. +type Client struct { + Token *oauth.Client + manager accountManager // *storage.Manager or fakeManager in tests + // pmanager is a partitioned cache for OBO authentication. *storage.PartitionedManager or fakeManager in tests + pmanager manager + + AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New(). + cacheAccessor cache.ExportReplace + cacheAccessorMu *sync.RWMutex +} + +// Option is an optional argument to the New constructor. +type Option func(c *Client) error + +// WithCacheAccessor allows you to set some type of cache for storing authentication tokens. +func WithCacheAccessor(ca cache.ExportReplace) Option { + return func(c *Client) error { + if ca != nil { + c.cacheAccessor = ca + } + return nil + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(c *Client) error { + var err error + if len(capabilities) > 0 { + cc, err := authority.NewClientCapabilities(capabilities) + if err == nil { + c.AuthParams.Capabilities = cc + } + } + return err + } +} + +// WithKnownAuthorityHosts specifies hosts Client shouldn't validate or request metadata for because they're known to the user +func WithKnownAuthorityHosts(hosts []string) Option { + return func(c *Client) error { + cp := make([]string, len(hosts)) + copy(cp, hosts) + c.AuthParams.KnownAuthorityHosts = cp + return nil + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C(sendX5C bool) Option { + return func(c *Client) error { + c.AuthParams.SendX5C = sendX5C + return nil + } +} + +func WithRegionDetection(region string) Option { + return func(c *Client) error { + c.AuthParams.AuthorityInfo.Region = region + return nil + } +} + +func WithInstanceDiscovery(instanceDiscoveryEnabled bool) Option { + return func(c *Client) error { + c.AuthParams.AuthorityInfo.ValidateAuthority = instanceDiscoveryEnabled + c.AuthParams.AuthorityInfo.InstanceDiscoveryDisabled = !instanceDiscoveryEnabled + return nil + } +} + +// New is the constructor for Base. +func New(clientID string, authorityURI string, token *oauth.Client, options ...Option) (Client, error) { + //By default, validateAuthority is set to true and instanceDiscoveryDisabled is set to false + authInfo, err := authority.NewInfoFromAuthorityURI(authorityURI, true, false) + if err != nil { + return Client{}, err + } + authParams := authority.NewAuthParams(clientID, authInfo) + client := Client{ // Note: Hey, don't even THINK about making Base into *Base. See "design notes" in public.go and confidential.go + Token: token, + AuthParams: authParams, + cacheAccessorMu: &sync.RWMutex{}, + manager: storage.New(token), + pmanager: storage.NewPartitionedManager(token), + } + for _, o := range options { + if err = o(&client); err != nil { + break + } + } + return client, err + +} + +// AuthCodeURL creates a URL used to acquire an authorization code. +func (b Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, authParams authority.AuthParams) (string, error) { + endpoints, err := b.Token.ResolveEndpoints(ctx, authParams.AuthorityInfo, "") + if err != nil { + return "", err + } + + baseURL, err := url.Parse(endpoints.AuthorizationEndpoint) + if err != nil { + return "", err + } + + claims, err := authParams.MergeCapabilitiesAndClaims() + if err != nil { + return "", err + } + + v := url.Values{} + v.Add("client_id", clientID) + v.Add("response_type", "code") + v.Add("redirect_uri", redirectURI) + v.Add("scope", strings.Join(scopes, scopeSeparator)) + if authParams.State != "" { + v.Add("state", authParams.State) + } + if claims != "" { + v.Add("claims", claims) + } + if authParams.CodeChallenge != "" { + v.Add("code_challenge", authParams.CodeChallenge) + } + if authParams.CodeChallengeMethod != "" { + v.Add("code_challenge_method", authParams.CodeChallengeMethod) + } + if authParams.LoginHint != "" { + v.Add("login_hint", authParams.LoginHint) + } + if authParams.Prompt != "" { + v.Add("prompt", authParams.Prompt) + } + if authParams.DomainHint != "" { + v.Add("domain_hint", authParams.DomainHint) + } + // There were left over from an implementation that didn't use any of these. We may + // need to add them later, but as of now aren't needed. + /* + if p.ResponseMode != "" { + urlParams.Add("response_mode", p.ResponseMode) + } + */ + baseURL.RawQuery = v.Encode() + return baseURL.String(), nil +} + +func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilentParameters) (AuthResult, error) { + ar := AuthResult{} + // when tenant == "", the caller didn't specify a tenant and WithTenant will choose the client's configured tenant + tenant := silent.TenantID + authParams, err := b.AuthParams.WithTenant(tenant) + if err != nil { + return ar, err + } + authParams.Scopes = silent.Scopes + authParams.HomeAccountID = silent.Account.HomeAccountID + authParams.AuthorizationType = silent.AuthorizationType + authParams.Claims = silent.Claims + authParams.UserAssertion = silent.UserAssertion + + m := b.pmanager + if authParams.AuthorizationType != authority.ATOnBehalfOf { + authParams.AuthorizationType = authority.ATRefreshToken + m = b.manager + } + if b.cacheAccessor != nil { + key := authParams.CacheKey(silent.IsAppCache) + b.cacheAccessorMu.RLock() + err = b.cacheAccessor.Replace(ctx, m, cache.ReplaceHints{PartitionKey: key}) + b.cacheAccessorMu.RUnlock() + } + if err != nil { + return ar, err + } + storageTokenResponse, err := m.Read(ctx, authParams) + if err != nil { + return ar, err + } + + // ignore cached access tokens when given claims + if silent.Claims == "" { + ar, err = AuthResultFromStorage(storageTokenResponse) + if err == nil { + return ar, err + } + } + + // redeem a cached refresh token, if available + if reflect.ValueOf(storageTokenResponse.RefreshToken).IsZero() { + return ar, errors.New("no token found") + } + var cc *accesstokens.Credential + if silent.RequestType == accesstokens.ATConfidential { + cc = silent.Credential + } + token, err := b.Token.Refresh(ctx, silent.RequestType, authParams, cc, storageTokenResponse.RefreshToken) + if err != nil { + return ar, err + } + return b.AuthResultFromToken(ctx, authParams, token, true) +} + +func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) { + authParams, err := b.AuthParams.WithTenant(authCodeParams.TenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Claims = authCodeParams.Claims + authParams.Scopes = authCodeParams.Scopes + authParams.Redirecturi = authCodeParams.RedirectURI + authParams.AuthorizationType = authority.ATAuthCode + + var cc *accesstokens.Credential + if authCodeParams.AppType == accesstokens.ATConfidential { + cc = authCodeParams.Credential + authParams.IsConfidentialClient = true + } + + req, err := accesstokens.NewCodeChallengeRequest(authParams, authCodeParams.AppType, cc, authCodeParams.Code, authCodeParams.Challenge) + if err != nil { + return AuthResult{}, err + } + + token, err := b.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return b.AuthResultFromToken(ctx, authParams, token, true) +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams AcquireTokenOnBehalfOfParameters) (AuthResult, error) { + var ar AuthResult + silentParameters := AcquireTokenSilentParameters{ + Scopes: onBehalfOfParams.Scopes, + RequestType: accesstokens.ATConfidential, + Credential: onBehalfOfParams.Credential, + UserAssertion: onBehalfOfParams.UserAssertion, + AuthorizationType: authority.ATOnBehalfOf, + TenantID: onBehalfOfParams.TenantID, + Claims: onBehalfOfParams.Claims, + } + ar, err := b.AcquireTokenSilent(ctx, silentParameters) + if err == nil { + return ar, err + } + authParams, err := b.AuthParams.WithTenant(onBehalfOfParams.TenantID) + if err != nil { + return AuthResult{}, err + } + authParams.AuthorizationType = authority.ATOnBehalfOf + authParams.Claims = onBehalfOfParams.Claims + authParams.Scopes = onBehalfOfParams.Scopes + authParams.UserAssertion = onBehalfOfParams.UserAssertion + token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential) + if err == nil { + ar, err = b.AuthResultFromToken(ctx, authParams, token, true) + } + return ar, err +} + +func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) { + if !cacheWrite { + return NewAuthResult(token, shared.Account{}) + } + var m manager = b.manager + if authParams.AuthorizationType == authority.ATOnBehalfOf { + m = b.pmanager + } + key := token.CacheKey(authParams) + if b.cacheAccessor != nil { + b.cacheAccessorMu.Lock() + defer b.cacheAccessorMu.Unlock() + err := b.cacheAccessor.Replace(ctx, m, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return AuthResult{}, err + } + } + account, err := m.Write(authParams, token) + if err != nil { + return AuthResult{}, err + } + ar, err := NewAuthResult(token, account) + if err == nil && b.cacheAccessor != nil { + err = b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key}) + } + return ar, err +} + +func (b Client) AllAccounts(ctx context.Context) ([]shared.Account, error) { + if b.cacheAccessor != nil { + b.cacheAccessorMu.RLock() + defer b.cacheAccessorMu.RUnlock() + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return nil, err + } + } + return b.manager.AllAccounts(), nil +} + +func (b Client) Account(ctx context.Context, homeAccountID string) (shared.Account, error) { + if b.cacheAccessor != nil { + b.cacheAccessorMu.RLock() + defer b.cacheAccessorMu.RUnlock() + authParams := b.AuthParams // This is a copy, as we don't have a pointer receiver and .AuthParams is not a pointer. + authParams.AuthorizationType = authority.AccountByID + authParams.HomeAccountID = homeAccountID + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return shared.Account{}, err + } + } + return b.manager.Account(homeAccountID), nil +} + +// RemoveAccount removes all the ATs, RTs and IDTs from the cache associated with this account. +func (b Client) RemoveAccount(ctx context.Context, account shared.Account) error { + if b.cacheAccessor == nil { + b.manager.RemoveAccount(account, b.AuthParams.ClientID) + return nil + } + b.cacheAccessorMu.Lock() + defer b.cacheAccessorMu.Unlock() + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return err + } + b.manager.RemoveAccount(account, b.AuthParams.ClientID) + return b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key}) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go new file mode 100644 index 00000000..548c2fae --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go @@ -0,0 +1,200 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type Contract struct { + AccessTokens map[string]AccessToken `json:"AccessToken,omitempty"` + RefreshTokens map[string]accesstokens.RefreshToken `json:"RefreshToken,omitempty"` + IDTokens map[string]IDToken `json:"IdToken,omitempty"` + Accounts map[string]shared.Account `json:"Account,omitempty"` + AppMetaData map[string]AppMetaData `json:"AppMetadata,omitempty"` + + AdditionalFields map[string]interface{} +} + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type InMemoryContract struct { + AccessTokensPartition map[string]map[string]AccessToken + RefreshTokensPartition map[string]map[string]accesstokens.RefreshToken + IDTokensPartition map[string]map[string]IDToken + AccountsPartition map[string]map[string]shared.Account + AppMetaData map[string]AppMetaData +} + +// NewContract is the constructor for Contract. +func NewInMemoryContract() *InMemoryContract { + return &InMemoryContract{ + AccessTokensPartition: map[string]map[string]AccessToken{}, + RefreshTokensPartition: map[string]map[string]accesstokens.RefreshToken{}, + IDTokensPartition: map[string]map[string]IDToken{}, + AccountsPartition: map[string]map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + } +} + +// NewContract is the constructor for Contract. +func NewContract() *Contract { + return &Contract{ + AccessTokens: map[string]AccessToken{}, + RefreshTokens: map[string]accesstokens.RefreshToken{}, + IDTokens: map[string]IDToken{}, + Accounts: map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + AdditionalFields: map[string]interface{}{}, + } +} + +// AccessToken is the JSON representation of a MSAL access token for encoding to storage. +type AccessToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + Scopes string `json:"target,omitempty"` + ExpiresOn internalTime.Unix `json:"expires_on,omitempty"` + ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"` + CachedAt internalTime.Unix `json:"cached_at,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccessToken is the constructor for AccessToken. +func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token string) AccessToken { + return AccessToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "AccessToken", + ClientID: clientID, + Secret: token, + Scopes: scopes, + CachedAt: internalTime.Unix{T: cachedAt.UTC()}, + ExpiresOn: internalTime.Unix{T: expiresOn.UTC()}, + ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()}, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AccessToken) Key() string { + return strings.Join( + []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}, + shared.CacheKeySeparator, + ) +} + +// FakeValidate enables tests to fake access token validation +var FakeValidate func(AccessToken) error + +// Validate validates that this AccessToken can be used. +func (a AccessToken) Validate() error { + if FakeValidate != nil { + return FakeValidate(a) + } + if a.CachedAt.T.After(time.Now()) { + return errors.New("access token isn't valid, it was cached at a future time") + } + if a.ExpiresOn.T.Before(time.Now().Add(5 * time.Minute)) { + return fmt.Errorf("access token is expired") + } + if a.CachedAt.T.IsZero() { + return fmt.Errorf("access token does not have CachedAt set") + } + return nil +} + +// IDToken is the JSON representation of an MSAL id token for encoding to storage. +type IDToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + AdditionalFields map[string]interface{} +} + +// IsZero determines if IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// NewIDToken is the constructor for IDToken. +func NewIDToken(homeID, env, realm, clientID, idToken string) IDToken { + return IDToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "IDToken", + ClientID: clientID, + Secret: idToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (id IDToken) Key() string { + return strings.Join( + []string{id.HomeAccountID, id.Environment, id.CredentialType, id.ClientID, id.Realm}, + shared.CacheKeySeparator, + ) +} + +// AppMetaData is the JSON representation of application metadata for encoding to storage. +type AppMetaData struct { + FamilyID string `json:"family_id,omitempty"` + ClientID string `json:"client_id,omitempty"` + Environment string `json:"environment,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAppMetaData is the constructor for AppMetaData. +func NewAppMetaData(familyID, clientID, environment string) AppMetaData { + return AppMetaData{ + FamilyID: familyID, + ClientID: clientID, + Environment: environment, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AppMetaData) Key() string { + return strings.Join( + []string{"AppMetaData", a.Environment, a.ClientID}, + shared.CacheKeySeparator, + ) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go new file mode 100644 index 00000000..87d7d797 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go @@ -0,0 +1,436 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// PartitionedManager is a partitioned in-memory cache of access tokens, accounts and meta data. +type PartitionedManager struct { + contract *InMemoryContract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// NewPartitionedManager is the constructor for PartitionedManager. +func NewPartitionedManager(requests *oauth.Client) *PartitionedManager { + m := &PartitionedManager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewInMemoryContract() + return m +} + +// Read reads a storage token from the cache if it exists. +func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + tr := TokenResponse{} + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + + // fetch metadata if instanceDiscovery is enabled + aliases := []string{authParameters.AuthorityInfo.Host} + if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases + } + + userAssertionHash := authParameters.AssertionHash() + partitionKeyFromRequest := userAssertionHash + + // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating + // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. + accessToken, err := m.readAccessToken(aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest) + if err == nil { + tr.AccessToken = accessToken + } + idToken, err := m.readIDToken(aliases, realm, clientID, userAssertionHash, getPartitionKeyIDTokenRead(accessToken)) + if err == nil { + tr.IDToken = idToken + } + + if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil { + // we need the family ID to identify the correct refresh token, if any + familyID := appMetadata.FamilyID + refreshToken, err := m.readRefreshToken(aliases, familyID, clientID, userAssertionHash, partitionKeyFromRequest) + if err == nil { + tr.RefreshToken = refreshToken + } + } + + account, err := m.readAccount(aliases, realm, userAssertionHash, idToken.HomeAccountID) + if err == nil { + tr.Account = account + } + return tr, nil +} + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + userAssertionHash := authParameters.AssertionHash() + cachedAt := time.Now() + + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + refreshToken.UserAssertionHash = userAssertionHash + } + if err := m.writeRefreshToken(refreshToken, getPartitionKeyRefreshToken(refreshToken)); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + accessToken.UserAssertionHash = userAssertionHash // get Hash method on this + } + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken, getPartitionKeyAccessToken(accessToken)); err != nil { + return account, err + } + } else { + return shared.Account{}, err + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + idToken.UserAssertionHash = userAssertionHash + } + if err := m.writeIDToken(idToken, getPartitionKeyIDToken(idToken)); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + preferredUsername := idTokenJwt.UPN + if idTokenJwt.PreferredUsername != "" { + preferredUsername = idTokenJwt.PreferredUsername + } + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + preferredUsername, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + account.UserAssertionHash = userAssertionHash + } + if err := m.writeAccount(account, getPartitionKeyAccount(account)); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *PartitionedManager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *PartitionedManager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *PartitionedManager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey string) (AccessToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + if accessTokens, ok := m.contract.AccessTokensPartition[partitionKey]; ok { + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for _, at := range accessTokens { + if at.Realm == realm && at.ClientID == clientID && at.UserAssertionHash == userAssertionHash { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at, nil + } + } + } + } + } + return AccessToken{}, fmt.Errorf("access token not found") +} + +func (m *PartitionedManager) writeAccessToken(accessToken AccessToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.AccessTokensPartition[partitionKey] = make(map[string]AccessToken) + } + m.contract.AccessTokensPartition[partitionKey][key] = accessToken + return nil +} + +func matchFamilyRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string, clientID string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *PartitionedManager) readRefreshToken(envAliases []string, familyID, clientID, userAssertionHash, partitionKey string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshTokenObo(rt, userAssertionHash, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshTokenObo(rt, userAssertionHash, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, matcher := range matchers { + for _, rt := range m.contract.RefreshTokensPartition[partitionKey] { + if matcher(rt) { + return rt, nil + } + } + } + + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func (m *PartitionedManager) writeRefreshToken(refreshToken accesstokens.RefreshToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := refreshToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.RefreshTokensPartition[partitionKey] = make(map[string]accesstokens.RefreshToken) + } + m.contract.RefreshTokensPartition[partitionKey][key] = refreshToken + return nil +} + +func (m *PartitionedManager) readIDToken(envAliases []string, realm, clientID, userAssertionHash, partitionKey string) (IDToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, idt := range m.contract.IDTokensPartition[partitionKey] { + if idt.Realm == realm && idt.ClientID == clientID && idt.UserAssertionHash == userAssertionHash { + if checkAlias(idt.Environment, envAliases) { + return idt, nil + } + } + } + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *PartitionedManager) writeIDToken(idToken IDToken, partitionKey string) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.IDTokensPartition[partitionKey] == nil { + m.contract.IDTokensPartition[partitionKey] = make(map[string]IDToken) + } + m.contract.IDTokensPartition[partitionKey][key] = idToken + return nil +} + +func (m *PartitionedManager) readAccount(envAliases []string, realm, UserAssertionHash, partitionKey string) (shared.Account, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for _, acc := range m.contract.AccountsPartition[partitionKey] { + if checkAlias(acc.Environment, envAliases) && acc.UserAssertionHash == UserAssertionHash && acc.Realm == realm { + return acc, nil + } + } + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *PartitionedManager) writeAccount(account shared.Account, partitionKey string) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.AccountsPartition[partitionKey] == nil { + m.contract.AccountsPartition[partitionKey] = make(map[string]shared.Account) + } + m.contract.AccountsPartition[partitionKey][key] = account + return nil +} + +func (m *PartitionedManager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + return app, nil + } + } + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *PartitionedManager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *PartitionedManager) update(cache *InMemoryContract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *PartitionedManager) Marshal() ([]byte, error) { + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *PartitionedManager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewInMemoryContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} + +func getPartitionKeyAccessToken(item AccessToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyRefreshToken(item accesstokens.RefreshToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyIDToken(item IDToken) string { + return item.HomeAccountID +} + +func getPartitionKeyAccount(item shared.Account) string { + return item.HomeAccountID +} + +func getPartitionKeyIDTokenRead(item AccessToken) string { + return item.HomeAccountID +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go new file mode 100644 index 00000000..add75192 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -0,0 +1,517 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package storage holds all cached token information for MSAL. This storage can be +// augmented with third-party extensions to provide persistent storage. In that case, +// reads and writes in upper packages will call Marshal() to take the entire in-memory +// representation and write it to storage and Unmarshal() to update the entire in-memory +// storage with what was in the persistent storage. The persistent storage can only be +// accessed in this way because multiple MSAL clients written in multiple languages can +// access the same storage and must adhere to the same method that was defined +// previously. +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// aadInstanceDiscoveryer allows faking in tests. +// It is implemented in production by ops/authority.Client +type aadInstanceDiscoveryer interface { + AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// TokenResponse mimics a token response that was pulled from the cache. +type TokenResponse struct { + RefreshToken accesstokens.RefreshToken + IDToken IDToken // *Credential + AccessToken AccessToken + Account shared.Account +} + +// Manager is an in-memory cache of access tokens, accounts and meta data. This data is +// updated on read/write calls. Unmarshal() replaces all data stored here with whatever +// was given to it on each call. +type Manager struct { + contract *Contract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// New is the constructor for Manager. +func New(requests *oauth.Client) *Manager { + m := &Manager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewContract() + return m +} + +func checkAlias(alias string, aliases []string) bool { + for _, v := range aliases { + if alias == v { + return true + } + } + return false +} + +func isMatchingScopes(scopesOne []string, scopesTwo string) bool { + newScopesTwo := strings.Split(scopesTwo, scopeSeparator) + scopeCounter := 0 + for _, scope := range scopesOne { + for _, otherScope := range newScopesTwo { + if strings.EqualFold(scope, otherScope) { + scopeCounter++ + continue + } + } + } + return scopeCounter == len(scopesOne) +} + +// Read reads a storage token from the cache if it exists. +func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + tr := TokenResponse{} + homeAccountID := authParameters.HomeAccountID + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + + // fetch metadata if instanceDiscovery is enabled + aliases := []string{authParameters.AuthorityInfo.Host} + if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases + } + + accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes) + tr.AccessToken = accessToken + + if homeAccountID == "" { + // caller didn't specify a user, so there's no reason to search for an ID or refresh token + return tr, nil + } + // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating + // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. + idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID) + if err == nil { + tr.IDToken = idToken + } + + if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil { + // we need the family ID to identify the correct refresh token, if any + familyID := appMetadata.FamilyID + refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID) + if err == nil { + tr.RefreshToken = refreshToken + } + } + + account, err := m.readAccount(homeAccountID, aliases, realm) + if err == nil { + tr.Account = account + } + return tr, nil +} + +const scopeSeparator = " " + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + cachedAt := time.Now() + + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if err := m.writeRefreshToken(refreshToken); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + ) + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken); err != nil { + return account, err + } + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if err := m.writeIDToken(idToken); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + preferredUsername := idTokenJwt.UPN + if idTokenJwt.PreferredUsername != "" { + preferredUsername = idTokenJwt.PreferredUsername + } + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + preferredUsername, + ) + if err := m.writeAccount(account); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *Manager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) AccessToken { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for _, at := range m.contract.AccessTokens { + if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at + } + } + } + } + return AccessToken{} +} + +func (m *Manager) writeAccessToken(accessToken AccessToken) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + m.contract.AccessTokens[key] = accessToken + return nil +} + +func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, clientID string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshToken(rt, homeID, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshToken(rt, homeID, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, matcher := range matchers { + for _, rt := range m.contract.RefreshTokens { + if matcher(rt) { + return rt, nil + } + } + } + + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func matchFamilyRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string, clientID string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) error { + key := refreshToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.RefreshTokens[key] = refreshToken + return nil +} + +func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, idt := range m.contract.IDTokens { + if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID { + if checkAlias(idt.Environment, envAliases) { + return idt, nil + } + } + } + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *Manager) writeIDToken(idToken IDToken) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.IDTokens[key] = idToken + return nil +} + +func (m *Manager) AllAccounts() []shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + var accounts []shared.Account + for _, v := range m.contract.Accounts { + accounts = append(accounts, v) + } + + return accounts +} + +func (m *Manager) Account(homeAccountID string) shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, v := range m.contract.Accounts { + if v.HomeAccountID == homeAccountID { + return v + } + } + + return shared.Account{} +} + +func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for _, acc := range m.contract.Accounts { + if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm { + return acc, nil + } + } + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *Manager) writeAccount(account shared.Account) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.Accounts[key] = account + return nil +} + +func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + return app, nil + } + } + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *Manager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// RemoveAccount removes all the associated ATs, RTs and IDTs from the cache associated with this account. +func (m *Manager) RemoveAccount(account shared.Account, clientID string) { + m.removeRefreshTokens(account.HomeAccountID, account.Environment, clientID) + m.removeAccessTokens(account.HomeAccountID, account.Environment) + m.removeIDTokens(account.HomeAccountID, account.Environment) + m.removeAccounts(account.HomeAccountID, account.Environment) +} + +func (m *Manager) removeRefreshTokens(homeID string, env string, clientID string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, rt := range m.contract.RefreshTokens { + // Check for RTs associated with the account. + if rt.HomeAccountID == homeID && rt.Environment == env { + // Do RT's app ownership check as a precaution, in case family apps + // and 3rd-party apps share same token cache, although they should not. + if rt.ClientID == clientID || rt.FamilyID != "" { + delete(m.contract.RefreshTokens, key) + } + } + } +} + +func (m *Manager) removeAccessTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, at := range m.contract.AccessTokens { + // Remove AT's associated with the account + if at.HomeAccountID == homeID && at.Environment == env { + // # To avoid the complexity of locating sibling family app's AT, we skip AT's app ownership check. + // It means ATs for other apps will also be removed, it is OK because: + // non-family apps are not supposed to share token cache to begin with; + // Even if it happens, we keep other app's RT already, so SSO still works. + delete(m.contract.AccessTokens, key) + } + } +} + +func (m *Manager) removeIDTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, idt := range m.contract.IDTokens { + // Remove ID tokens associated with the account. + if idt.HomeAccountID == homeID && idt.Environment == env { + delete(m.contract.IDTokens, key) + } + } +} + +func (m *Manager) removeAccounts(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, acc := range m.contract.Accounts { + // Remove the specified account. + if acc.HomeAccountID == homeID && acc.Environment == env { + delete(m.contract.Accounts, key) + } + } +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *Manager) update(cache *Contract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *Manager) Marshal() ([]byte, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *Manager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json new file mode 100644 index 00000000..1d818192 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json @@ -0,0 +1,56 @@ +{ + "Account": { + "uid.utid-login.windows.net-contoso": { + "username": "John Doe", + "local_account_id": "object1234", + "realm": "contoso", + "environment": "login.windows.net", + "home_account_id": "uid.utid", + "authority_type": "MSSTS" + } + }, + "RefreshToken": { + "uid.utid-login.windows.net-refreshtoken-my_client_id--s2 s1 s3": { + "target": "s2 s1 s3", + "environment": "login.windows.net", + "credential_type": "RefreshToken", + "secret": "a refresh token", + "client_id": "my_client_id", + "home_account_id": "uid.utid" + } + }, + "AccessToken": { + "an-entry": { + "foo": "bar" + }, + "uid.utid-login.windows.net-accesstoken-my_client_id-contoso-s2 s1 s3": { + "environment": "login.windows.net", + "credential_type": "AccessToken", + "secret": "an access token", + "realm": "contoso", + "target": "s2 s1 s3", + "client_id": "my_client_id", + "cached_at": "1000", + "home_account_id": "uid.utid", + "extended_expires_on": "4600", + "expires_on": "4600" + } + }, + "IdToken": { + "uid.utid-login.windows.net-idtoken-my_client_id-contoso-": { + "realm": "contoso", + "environment": "login.windows.net", + "credential_type": "IdToken", + "secret": "header.eyJvaWQiOiAib2JqZWN0MTIzNCIsICJwcmVmZXJyZWRfdXNlcm5hbWUiOiAiSm9obiBEb2UiLCAic3ViIjogInN1YiJ9.signature", + "client_id": "my_client_id", + "home_account_id": "uid.utid" + } + }, + "unknownEntity": {"field1":"1","field2":"whats"}, + "AppMetadata": { + "AppMetadata-login.windows.net-my_client_id": { + "environment": "login.windows.net", + "client_id": "my_client_id" + } + } + } \ No newline at end of file diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go new file mode 100644 index 00000000..7b673e3f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// package exported contains internal types that are re-exported from a public package +package exported + +// AssertionRequestOptions has information required to generate a client assertion +type AssertionRequestOptions struct { + // ClientID identifies the application for which an assertion is requested. Used as the assertion's "iss" and "sub" claims. + ClientID string + + // TokenEndpoint is the intended token endpoint. Used as the assertion's "aud" claim. + TokenEndpoint string +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters struct { + // Claims contains any additional claims requested for the token + Claims string + // CorrelationID of the authentication request + CorrelationID string + // Scopes requested for the token + Scopes []string + // TenantID identifies the tenant in which to authenticate + TenantID string +} + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult struct { + // AccessToken is the requested token + AccessToken string + // ExpiresInSeconds is the lifetime of the token in seconds + ExpiresInSeconds int +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md new file mode 100644 index 00000000..09edb01b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md @@ -0,0 +1,140 @@ +# JSON Package Design +Author: John Doak(jdoak@microsoft.com) + +## Why? + +This project needs a special type of marshal/unmarshal not directly supported +by the encoding/json package. + +The need revolves around a few key wants/needs: +- unmarshal and marshal structs representing JSON messages +- fields in the messgage not in the struct must be maintained when unmarshalled +- those same fields must be marshalled back when encoded again + +The initial version used map[string]interface{} to put in the keys that +were known and then any other keys were put into a field called AdditionalFields. + +This has a few negatives: +- Dual marshaling/unmarshalling is required +- Adding a struct field requires manually adding a key by name to be encoded/decoded from the map (which is a loosely coupled construct), which can lead to bugs that aren't detected or have bad side effects +- Tests can become quickly disconnected if those keys aren't put +in tests as well. So you think you have support working, but you +don't. Existing tests were found that didn't test the marshalling output. +- There is no enforcement that if AdditionalFields is required on one struct, it should be on all containers +that don't have custom marshal/unmarshal. + +This package aims to support our needs by providing custom Marshal()/Unmarshal() functions. + +This prevents all the negatives in the initial solution listed above. However, it does add its own negative: +- Custom encoding/decoding via reflection is messy (as can be seen in encoding/json itself) + +Go proverb: Reflection is never clear +Suggested reading: https://blog.golang.org/laws-of-reflection + +## Important design decisions + +- We don't want to understand all JSON decoding rules +- We don't want to deal with all the quoting, commas, etc on decode +- Need support for json.Marshaler/Unmarshaler, so we can support types like time.Time +- If struct does not implement json.Unmarshaler, it must have AdditionalFields defined +- We only support root level objects that are \*struct or struct + +To faciliate these goals, we will utilize the json.Encoder and json.Decoder. +They provide streaming processing (efficient) and return errors on bad JSON. + +Support for json.Marshaler/Unmarshaler allows for us to use non-basic types +that must be specially encoded/decoded (like time.Time objects). + +We don't support types that can't customer unmarshal or have AdditionalFields +in order to prevent future devs from forgetting that important field and +generating bad return values. + +Support for root level objects of \*struct or struct simply acknowledges the +fact that this is designed only for the purposes listed in the Introduction. +Outside that (like encoding a lone number) should be done with the +regular json package (as it will not have additional fields). + +We don't support a few things on json supported reference types and structs: +- \*map: no need for pointers to maps +- \*slice: no need for pointers to slices +- any further pointers on struct after \*struct + +There should never be a need for this in Go. + +## Design + +## State Machines + +This uses state machine designs that based upon the Rob Pike talk on +lexers and parsers: https://www.youtube.com/watch?v=HxaD_trXwRE + +This is the most common pattern for state machines in Go and +the model to follow closesly when dealing with streaming +processing of textual data. + +Our state machines are based on the type: +```go +type stateFn func() (stateFn, error) +``` + +The state machine itself is simply a struct that has methods that +satisfy stateFn. + +Our state machines have a few standard calls +- run(): runs the state machine +- start(): always the first stateFn to be called + +All state machines have the following logic: +* run() is called +* start() is called and returns the next stateFn or error +* stateFn is called + - If returned stateFn(next state) is non-nil, call it + - If error is non-nil, run() returns the error + - If stateFn == nil and err == nil, run() return err == nil + +## Supporting types + +Marshalling/Unmarshalling must support(within top level struct): +- struct +- \*struct +- []struct +- []\*struct +- []map[string]structContainer +- [][]structContainer + +**Term note:** structContainer == type that has a struct or \*struct inside it + +We specifically do not support []interface or map[string]interface +where the interface value would hold some value with a struct in it. + +Those will still marshal/unmarshal, but without support for +AdditionalFields. + +## Marshalling + +The marshalling design will be based around a statemachine design. + +The basic logic is as follows: + +* If struct has custom marshaller, call it and return +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* If struct does not have "AdditionalFields", give an error +* Get struct tag detailing json names to go names, create mapping +* For each public field name + - Write field name out + - If field value is a struct, recursively call our state machine + - Otherwise, use the json.Encoder to write out the value + +## Unmarshalling + +The unmarshalling desin is also based around a statemachine design. The +basic logic is as follows: + +* If struct has custom marhaller, call it +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* Get struct tag detailing json names to go names, create mapping +* For each key found + - If key exists, + - If value is basic type, extract value into struct field using Decoder + - If value is struct type, recursively call statemachine + - If key doesn't exist, add it to AdditionalFields if it exists using Decoder diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go new file mode 100644 index 00000000..2238521f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -0,0 +1,184 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package json provide functions for marshalling an unmarshalling types to JSON. These functions are meant to +// be utilized inside of structs that implement json.Unmarshaler and json.Marshaler interfaces. +// This package provides the additional functionality of writing fields that are not in the struct when marshalling +// to a field called AdditionalFields if that field exists and is a map[string]interface{}. +// When marshalling, if the struct has all the same prerequisites, it will uses the keys in AdditionalFields as +// extra fields. This package uses encoding/json underneath. +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +const addField = "AdditionalFields" +const ( + marshalJSON = "MarshalJSON" + unmarshalJSON = "UnmarshalJSON" +) + +var ( + leftBrace = []byte("{")[0] + rightBrace = []byte("}")[0] + comma = []byte(",")[0] + leftParen = []byte("[")[0] + rightParen = []byte("]")[0] +) + +var mapStrInterType = reflect.TypeOf(map[string]interface{}{}) + +// stateFn defines a state machine function. This will be used in all state +// machines in this package. +type stateFn func() (stateFn, error) + +// Marshal is used to marshal a type into its JSON representation. It +// wraps the stdlib calls in order to marshal a struct or *struct so +// that a field called "AdditionalFields" of type map[string]interface{} +// with "-" used inside struct tag `json:"-"` can be marshalled as if +// they were fields within the struct. +func Marshal(i interface{}) ([]byte, error) { + buff := bytes.Buffer{} + enc := json.NewEncoder(&buff) + enc.SetEscapeHTML(false) + enc.SetIndent("", "") + + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + err := marshalStruct(v, &buff, enc) + if err != nil { + return nil, err + } + return buff.Bytes(), nil +} + +// Unmarshal unmarshals a []byte representing JSON into i, which must be a *struct. In addition, if the struct has +// a field called AdditionalFields of type map[string]interface{}, JSON data representing fields not in the struct +// will be written as key/value pairs to AdditionalFields. +func Unmarshal(b []byte, i interface{}) error { + if len(b) == 0 { + return nil + } + + jdec := json.NewDecoder(bytes.NewBuffer(b)) + jdec.UseNumber() + return unmarshalStruct(jdec, i) +} + +// MarshalRaw marshals i into a json.RawMessage. If I cannot be marshalled, +// this will panic. This is exposed to help test AdditionalField values +// which are stored as json.RawMessage. +func MarshalRaw(i interface{}) json.RawMessage { + b, err := json.Marshal(i) + if err != nil { + panic(err) + } + return json.RawMessage(b) +} + +// isDelim simply tests to see if a json.Token is a delimeter. +func isDelim(got json.Token) bool { + switch got.(type) { + case json.Delim: + return true + } + return false +} + +// delimIs tests got to see if it is want. +func delimIs(got json.Token, want rune) bool { + switch v := got.(type) { + case json.Delim: + if v == json.Delim(want) { + return true + } + } + return false +} + +// hasMarshalJSON will determine if the value or a pointer to this value has +// the MarshalJSON method. +func hasMarshalJSON(v reflect.Value) bool { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + return false +} + +// callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value. +// This will panic if the method is not defined. +func callMarshalJSON(v reflect.Value) ([]byte, error) { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if v.CanAddr() { + v = v.Addr() + } + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface())) +} + +// hasUnmarshalJSON will determine if the value or a pointer to this value has +// the UnmarshalJSON method. +func hasUnmarshalJSON(v reflect.Value) bool { + // You can't unmarshal on a non-pointer type. + if v.Kind() != reflect.Ptr { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Unmarshaler) + return ok + } + + return false +} + +// hasOmitEmpty indicates if the field has instructed us to not output +// the field if omitempty is set on the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func hasOmitEmpty(tag string) bool { + sl := strings.Split(tag, ",") + for _, str := range sl { + if str == "omitempty" { + return true + } + } + return false +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go new file mode 100644 index 00000000..cef442f2 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go @@ -0,0 +1,333 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// unmarshalMap unmarshal's a map. +func unmarshalMap(dec *json.Decoder, m reflect.Value) error { + if m.Kind() != reflect.Ptr || m.Elem().Kind() != reflect.Map { + panic("unmarshalMap called on non-*map value") + } + mapValueType := m.Elem().Type().Elem() + walk := mapWalk{dec: dec, m: m, valueType: mapValueType} + if err := walk.run(); err != nil { + return err + } + return nil +} + +type mapWalk struct { + dec *json.Decoder + key string + m reflect.Value + valueType reflect.Type +} + +// run runs our decoder state machine. +func (m *mapWalk) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapWalk) start() (stateFn, error) { + // maps can have custom unmarshaler's. + if hasUnmarshalJSON(m.m) { + err := m.dec.Decode(m.m.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the map value is: + // *struct/struct/map/slice + // otherwise use standard decode + t, _ := m.valueBaseType() + switch t.Kind() { + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := m.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to JSON null. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + return m.next, nil + case reflect.Ptr: + return nil, fmt.Errorf("do not support maps with values of '**type' or '*reference") + } + + // This is a basic map type, so just use Decode(). + if err := m.dec.Decode(m.m.Interface()); err != nil { + return nil, err + } + + return nil, nil +} + +func (m *mapWalk) next() (stateFn, error) { + if m.dec.More() { + key, err := m.dec.Token() + if err != nil { + return nil, err + } + m.key = key.(string) + return m.storeValue, nil + } + // No more entries, so remove final }. + _, err := m.dec.Token() + if err != nil { + return nil, err + } + return nil, nil +} + +func (m *mapWalk) storeValue() (stateFn, error) { + v := m.valueType + for { + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + continue + case reflect.Struct: + return m.storeStruct, nil + case reflect.Map: + return m.storeMap, nil + case reflect.Slice: + return m.storeSlice, nil + } + return nil, fmt.Errorf("bug: mapWalk.storeValue() called on unsupported type: %v", v.Kind()) + } +} + +func (m *mapWalk) storeStruct() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalStruct(m.dec, v.Interface()); err != nil { + return nil, err + } + + if m.valueType.Kind() == reflect.Ptr { + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + return m.next, nil + } + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +func (m *mapWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(m.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(m.dec, ptr); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + + return m.next, nil +} + +func (m *mapWalk) storeSlice() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalSlice(m.dec, v); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (m *mapWalk) valueBaseType() (reflect.Type, bool) { + ptr := false + v := m.valueType + if v.Kind() == reflect.Ptr { + ptr = true + v = v.Elem() + } + return v, ptr +} + +// unmarshalSlice unmarshal's the next value, which must be a slice, into +// ptrSlice, which must be a pointer to a slice. newValue() can be use to +// create the slice. +func unmarshalSlice(dec *json.Decoder, ptrSlice reflect.Value) error { + if ptrSlice.Kind() != reflect.Ptr || ptrSlice.Elem().Kind() != reflect.Slice { + panic("unmarshalSlice called on non-*[]slice value") + } + sliceValueType := ptrSlice.Elem().Type().Elem() + walk := sliceWalk{ + dec: dec, + s: ptrSlice, + valueType: sliceValueType, + } + if err := walk.run(); err != nil { + return err + } + + return nil +} + +type sliceWalk struct { + dec *json.Decoder + s reflect.Value // *[]slice + valueType reflect.Type +} + +// run runs our decoder state machine. +func (s *sliceWalk) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceWalk) start() (stateFn, error) { + // slices can have custom unmarshaler's. + if hasUnmarshalJSON(s.s) { + err := s.dec.Decode(s.s.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the slice value is: + // []*struct/[]struct/[]map/[]slice + // otherwise use standard decode + t := s.valueBaseType() + + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("cannot unmarshal into a ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := s.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to nil. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '[') { + return nil, fmt.Errorf("Unmarshal expected opening [, received %v", delim) + } + return s.next, nil + } + + if err := s.dec.Decode(s.s.Interface()); err != nil { + return nil, err + } + return nil, nil +} + +func (s *sliceWalk) next() (stateFn, error) { + if s.dec.More() { + return s.storeValue, nil + } + // Nothing left in the slice, remove closing ] + _, err := s.dec.Token() + return nil, err +} + +func (s *sliceWalk) storeValue() (stateFn, error) { + t := s.valueBaseType() + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("do not support 'pointer to pointer' or 'pointer to reference' types") + case reflect.Struct: + return s.storeStruct, nil + case reflect.Map: + return s.storeMap, nil + case reflect.Slice: + return s.storeSlice, nil + } + return nil, fmt.Errorf("bug: sliceWalk.storeValue() called on unsupported type: %v", t.Kind()) +} + +func (s *sliceWalk) storeStruct() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalStruct(s.dec, v.Interface()); err != nil { + return nil, err + } + + if s.valueType.Kind() == reflect.Ptr { + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + return s.next, nil + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + return s.next, nil +} + +func (s *sliceWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(s.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + + if err := unmarshalMap(s.dec, ptr); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + + return s.next, nil +} + +func (s *sliceWalk) storeSlice() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalSlice(s.dec, v); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + + return s.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (s *sliceWalk) valueBaseType() reflect.Type { + v := s.valueType + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + return v +} + +// newValue() returns a new *type that represents type passed. +func newValue(valueType reflect.Type) reflect.Value { + if valueType.Kind() == reflect.Ptr { + return reflect.New(valueType.Elem()) + } + return reflect.New(valueType) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go new file mode 100644 index 00000000..df5dc6e1 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go @@ -0,0 +1,346 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "unicode" +) + +// marshalStruct takes in i, which must be a *struct or struct and marshals its content +// as JSON into buff (sometimes with writes to buff directly, sometimes via enc). +// This call is recursive for all fields of *struct or struct type. +func marshalStruct(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + // We only care about custom Marshalling a struct. + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: marshal() received a non *struct or struct, received type %T", v.Interface()) + } + + if hasMarshalJSON(v) { + b, err := callMarshalJSON(v) + if err != nil { + return err + } + buff.Write(b) + return nil + } + + t := v.Type() + + // If it has an AdditionalFields field make sure its the right type. + f := v.FieldByName(addField) + if f.Kind() != reflect.Invalid { + if f.Kind() != reflect.Map { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + if !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + } + + translator, err := findFields(v) + if err != nil { + return err + } + + buff.WriteByte(leftBrace) + for x := 0; x < v.NumField(); x++ { + field := v.Field(x) + + // We don't access private fields. + if unicode.IsLower(rune(t.Field(x).Name[0])) { + continue + } + + if t.Field(x).Name == addField { + if v.Field(x).Len() > 0 { + if err := writeAddFields(field.Interface(), buff, enc); err != nil { + return err + } + buff.WriteByte(comma) + } + continue + } + + // If they have omitempty set, we don't write out the field if + // it is the zero value. + if hasOmitEmpty(t.Field(x).Tag.Get("json")) { + if v.Field(x).IsZero() { + continue + } + } + + // Write out the field name part. + jsonName := translator.jsonName(t.Field(x).Name) + buff.WriteString(fmt.Sprintf("%q:", jsonName)) + + if field.Kind() == reflect.Ptr { + field = field.Elem() + } + + if err := marshalStructField(field, buff, enc); err != nil { + return err + } + } + + buff.Truncate(buff.Len() - 1) // Remove final comma + buff.WriteByte(rightBrace) + + return nil +} + +func marshalStructField(field reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + // Determine if we need a trailing comma. + defer buff.WriteByte(comma) + + switch field.Kind() { + // If it was a *struct or struct, we need to recursively all marshal(). + case reflect.Struct: + if field.CanAddr() { + field = field.Addr() + } + return marshalStruct(field, buff, enc) + case reflect.Map: + return marshalMap(field, buff, enc) + case reflect.Slice: + return marshalSlice(field, buff, enc) + } + + // It is just a basic type, so encode it. + if err := enc.Encode(field.Interface()); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + return nil +} + +func marshalMap(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Map { + return fmt.Errorf("bug: marshalMap() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftBrace) + buff.WriteByte(rightBrace) + return nil + } + encoder := mapEncode{m: v, buff: buff, enc: enc} + return encoder.run() +} + +type mapEncode struct { + m reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (m *mapEncode) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapEncode) start() (stateFn, error) { + if hasMarshalJSON(m.m) { + b, err := callMarshalJSON(m.m) + if err != nil { + return nil, err + } + m.buff.Write(b) + return nil, nil + } + + valueBaseType := m.m.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + m.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return m.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := m.enc.Encode(m.m.Interface()); err != nil { + return nil, err + } + m.buff.Truncate(m.buff.Len() - 1) // Remove Encode() added \n + return nil, nil +} + +func (m *mapEncode) encode() (stateFn, error) { + m.buff.WriteByte(leftBrace) + + iter := m.m.MapRange() + for iter.Next() { + // Write the key. + k := iter.Key() + m.buff.WriteString(fmt.Sprintf("%q:", k.String())) + + v := iter.Value() + switch m.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, m.buff, m.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", m.valueBaseType.Kind())) + } + m.buff.WriteByte(comma) + } + m.buff.Truncate(m.buff.Len() - 1) // Remove final comma + m.buff.WriteByte(rightBrace) + + return nil, nil +} + +func marshalSlice(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Slice { + return fmt.Errorf("bug: marshalSlice() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftParen) + buff.WriteByte(rightParen) + return nil + } + encoder := sliceEncode{s: v, buff: buff, enc: enc} + return encoder.run() +} + +type sliceEncode struct { + s reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (s *sliceEncode) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceEncode) start() (stateFn, error) { + if hasMarshalJSON(s.s) { + b, err := callMarshalJSON(s.s) + if err != nil { + return nil, err + } + s.buff.Write(b) + return nil, nil + } + + valueBaseType := s.s.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + s.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return s.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := s.enc.Encode(s.s.Interface()); err != nil { + return nil, err + } + s.buff.Truncate(s.buff.Len() - 1) // Remove Encode added \n + + return nil, nil +} + +func (s *sliceEncode) encode() (stateFn, error) { + s.buff.WriteByte(leftParen) + for i := 0; i < s.s.Len(); i++ { + v := s.s.Index(i) + switch s.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, s.buff, s.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", s.valueBaseType.Kind())) + } + s.buff.WriteByte(comma) + } + s.buff.Truncate(s.buff.Len() - 1) // Remove final comma + s.buff.WriteByte(rightParen) + return nil, nil +} + +// writeAddFields writes the AdditionalFields struct field out to JSON as field +// values. i must be a map[string]interface{} or this will panic. +func writeAddFields(i interface{}, buff *bytes.Buffer, enc *json.Encoder) error { + m := i.(map[string]interface{}) + + x := 0 + for k, v := range m { + buff.WriteString(fmt.Sprintf("%q:", k)) + if err := enc.Encode(v); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + if x+1 != len(m) { + buff.WriteByte(comma) + } + x++ + } + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go new file mode 100644 index 00000000..07751544 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go @@ -0,0 +1,290 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +func unmarshalStruct(jdec *json.Decoder, i interface{}) error { + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + + if hasUnmarshalJSON(v) { + // Indicates that this type has a custom Unmarshaler. + return jdec.Decode(v.Addr().Interface()) + } + + f := v.FieldByName(addField) + if f.Kind() == reflect.Invalid { + return fmt.Errorf("Unmarshal(%T) only supports structs that have the field AdditionalFields or implements json.Unmarshaler", i) + } + + if f.Kind() != reflect.Map || !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", i) + } + + dec := newDecoder(jdec, v) + return dec.run() +} + +type decoder struct { + dec *json.Decoder + value reflect.Value // This will be a reflect.Struct + translator translateFields + key string +} + +func newDecoder(dec *json.Decoder, value reflect.Value) *decoder { + return &decoder{value: value, dec: dec} +} + +// run runs our decoder state machine. +func (d *decoder) run() error { + var state = d.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +// start looks for our opening delimeter '{' and then transitions to looping through our fields. +func (d *decoder) start() (stateFn, error) { + var err error + d.translator, err = findFields(d.value) + if err != nil { + return nil, err + } + + delim, err := d.dec.Token() + if err != nil { + return nil, err + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + + return d.next, nil +} + +// next gets the next struct field name from the raw json or stops the machine if we get our closing }. +func (d *decoder) next() (stateFn, error) { + if !d.dec.More() { + // Remove the closing }. + if _, err := d.dec.Token(); err != nil { + return nil, err + } + return nil, nil + } + + key, err := d.dec.Token() + if err != nil { + return nil, err + } + + d.key = key.(string) + return d.storeValue, nil +} + +// storeValue takes the next value and stores it our struct. If the field can't be found +// in the struct, it pushes the operation to storeAdditional(). +func (d *decoder) storeValue() (stateFn, error) { + goName := d.translator.goName(d.key) + if goName == "" { + goName = d.key + } + + // We don't have the field in the struct, so it goes in AdditionalFields. + f := d.value.FieldByName(goName) + if f.Kind() == reflect.Invalid { + return d.storeAdditional, nil + } + + // Indicates that this type has a custom Unmarshaler. + if hasUnmarshalJSON(f) { + err := d.dec.Decode(f.Addr().Interface()) + if err != nil { + return nil, err + } + return d.next, nil + } + + t, isPtr, err := fieldBaseType(d.value, goName) + if err != nil { + return nil, fmt.Errorf("type(%s) had field(%s) %w", d.value.Type().Name(), goName, err) + } + + switch t.Kind() { + // We need to recursively call ourselves on any *struct or struct. + case reflect.Struct: + if isPtr { + if f.IsNil() { + f.Set(reflect.New(t)) + } + } else { + f = f.Addr() + } + if err := unmarshalStruct(d.dec, f.Interface()); err != nil { + return nil, err + } + return d.next, nil + case reflect.Map: + v := reflect.MakeMap(f.Type()) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + case reflect.Slice: + v := reflect.MakeSlice(f.Type(), 0, 0) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalSlice(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + } + + if !isPtr { + f = f.Addr() + } + + // For values that are pointers, we need them to be non-nil in order + // to decode into them. + if f.IsNil() { + f.Set(reflect.New(t)) + } + + if err := d.dec.Decode(f.Interface()); err != nil { + return nil, err + } + + return d.next, nil +} + +// storeAdditional pushes the key/value into our .AdditionalFields map. +func (d *decoder) storeAdditional() (stateFn, error) { + rw := json.RawMessage{} + if err := d.dec.Decode(&rw); err != nil { + return nil, err + } + field := d.value.FieldByName(addField) + if field.IsNil() { + field.Set(reflect.MakeMap(field.Type())) + } + field.SetMapIndex(reflect.ValueOf(d.key), reflect.ValueOf(rw)) + return d.next, nil +} + +func fieldBaseType(v reflect.Value, fieldName string) (t reflect.Type, isPtr bool, err error) { + sf, ok := v.Type().FieldByName(fieldName) + if !ok { + return nil, false, fmt.Errorf("bug: fieldBaseType() lookup of field(%s) on type(%s): do not have field", fieldName, v.Type().Name()) + } + t = sf.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + isPtr = true + } + if t.Kind() == reflect.Ptr { + return nil, isPtr, fmt.Errorf("received pointer to pointer type, not supported") + } + return t, isPtr, nil +} + +type translateField struct { + jsonName string + goName string +} + +// translateFields is a list of translateFields with a handy lookup method. +type translateFields []translateField + +// goName loops through a list of fields looking for one contaning the jsonName and +// returning the goName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) goName(jsonName string) string { + for _, entry := range t { + if entry.jsonName == jsonName { + return entry.goName + } + } + return "" +} + +// jsonName loops through a list of fields looking for one contaning the goName and +// returning the jsonName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) jsonName(goName string) string { + for _, entry := range t { + if entry.goName == goName { + return entry.jsonName + } + } + return "" +} + +var umarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + +// findFields parses a struct and writes the field tags for lookup. It will return an error +// if any field has a type of *struct or struct that does not implement json.Marshaler. +func findFields(v reflect.Value) (translateFields, error) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("findFields received a %s type, expected *struct or struct", v.Type().Name()) + } + tfs := make([]translateField, 0, v.NumField()) + for i := 0; i < v.NumField(); i++ { + tf := translateField{ + goName: v.Type().Field(i).Name, + jsonName: parseTag(v.Type().Field(i).Tag.Get("json")), + } + switch tf.jsonName { + case "", "-": + tf.jsonName = tf.goName + } + tfs = append(tfs, tf) + + f := v.Field(i) + if f.Kind() == reflect.Ptr { + f = f.Elem() + } + if f.Kind() == reflect.Struct { + if f.Type().Implements(umarshalerType) { + return nil, fmt.Errorf("struct type %q which has field %q which "+ + "doesn't implement json.Unmarshaler", v.Type().Name(), v.Type().Field(i).Name) + } + } + } + return tfs, nil +} + +// parseTag just returns the first entry in the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func parseTag(tag string) string { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx] + } + return tag +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go new file mode 100644 index 00000000..a1c99621 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package time provides for custom types to translate time from JSON and other formats +// into time.Time objects. +package time + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Unix provides a type that can marshal and unmarshal a string representation +// of the unix epoch into a time.Time object. +type Unix struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (u Unix) MarshalJSON() ([]byte, error) { + if u.T.IsZero() { + return []byte(""), nil + } + return []byte(fmt.Sprintf("%q", strconv.FormatInt(u.T.Unix(), 10))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (u *Unix) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + u.T = time.Unix(int64(i), 0) + return nil +} + +// DurationTime provides a type that can marshal and unmarshal a string representation +// of a duration from now into a time.Time object. +// Note: I'm not sure this is the best way to do this. What happens is we get a field +// called "expires_in" that represents the seconds from now that this expires. We +// turn that into a time we call .ExpiresOn. But maybe we should be recording +// when the token was received at .TokenRecieved and .ExpiresIn should remain as a duration. +// Then we could have a method called ExpiresOn(). Honestly, the whole thing is +// bad because the server doesn't return a concrete time. I think this is +// cleaner, but its not great either. +type DurationTime struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (d DurationTime) MarshalJSON() ([]byte, error) { + if d.T.IsZero() { + return []byte(""), nil + } + + dt := time.Until(d.T) + return []byte(fmt.Sprintf("%d", int64(dt*time.Second))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (d *DurationTime) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + d.T = time.Now().Add(time.Duration(i) * time.Second) + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go new file mode 100644 index 00000000..04236ff3 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -0,0 +1,177 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package local contains a local HTTP server used with interactive authentication. +package local + +import ( + "context" + "fmt" + "net" + "net/http" + "strconv" + "strings" + "time" +) + +var okPage = []byte(` + + + + + Authentication Complete + + +

      Authentication complete. You can return to the application. Feel free to close this browser tab.

      + + +`) + +const failPage = ` + + + + + Authentication Failed + + +

      Authentication failed. You can return to the application. Feel free to close this browser tab.

      +

      Error details: error %s error_description: %s

      + + +` + +// Result is the result from the redirect. +type Result struct { + // Code is the code sent by the authority server. + Code string + // Err is set if there was an error. + Err error +} + +// Server is an HTTP server. +type Server struct { + // Addr is the address the server is listening on. + Addr string + resultCh chan Result + s *http.Server + reqState string +} + +// New creates a local HTTP server and starts it. +func New(reqState string, port int) (*Server, error) { + var l net.Listener + var err error + var portStr string + if port > 0 { + // use port provided by caller + l, err = net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) + portStr = strconv.FormatInt(int64(port), 10) + } else { + // find a free port + for i := 0; i < 10; i++ { + l, err = net.Listen("tcp", "localhost:0") + if err != nil { + continue + } + addr := l.Addr().String() + portStr = addr[strings.LastIndex(addr, ":")+1:] + break + } + } + if err != nil { + return nil, err + } + + serv := &Server{ + Addr: fmt.Sprintf("http://localhost:%s", portStr), + s: &http.Server{Addr: "localhost:0", ReadHeaderTimeout: time.Second}, + reqState: reqState, + resultCh: make(chan Result, 1), + } + serv.s.Handler = http.HandlerFunc(serv.handler) + + if err := serv.start(l); err != nil { + return nil, err + } + + return serv, nil +} + +func (s *Server) start(l net.Listener) error { + go func() { + err := s.s.Serve(l) + if err != nil { + select { + case s.resultCh <- Result{Err: err}: + default: + } + } + }() + + return nil +} + +// Result gets the result of the redirect operation. Once a single result is returned, the server +// is shutdown. ctx deadline will be honored. +func (s *Server) Result(ctx context.Context) Result { + select { + case <-ctx.Done(): + return Result{Err: ctx.Err()} + case r := <-s.resultCh: + return r + } +} + +// Shutdown shuts down the server. +func (s *Server) Shutdown() { + // Note: You might get clever and think you can do this in handler() as a defer, you can't. + _ = s.s.Shutdown(context.Background()) +} + +func (s *Server) putResult(r Result) { + select { + case s.resultCh <- r: + default: + } +} + +func (s *Server) handler(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query() + + headerErr := q.Get("error") + if headerErr != "" { + desc := q.Get("error_description") + // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, + // change this to s.error() and make s.error() write the failPage instead of an error code. + _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) + s.putResult(Result{Err: fmt.Errorf(desc)}) + return + } + + respState := q.Get("state") + switch respState { + case s.reqState: + case "": + s.error(w, http.StatusInternalServerError, "server didn't send OAuth state") + return + default: + s.error(w, http.StatusInternalServerError, "mismatched OAuth state, req(%s), resp(%s)", s.reqState, respState) + return + } + + code := q.Get("code") + if code == "" { + s.error(w, http.StatusInternalServerError, "authorization code missing in query string") + return + } + + _, _ = w.Write(okPage) + s.putResult(Result{Code: code}) +} + +func (s *Server) error(w http.ResponseWriter, code int, str string, i ...interface{}) { + err := fmt.Errorf(str, i...) + http.Error(w, err.Error(), code) + s.putResult(Result{Err: err}) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go new file mode 100644 index 00000000..ebd86e2b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -0,0 +1,353 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package oauth + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" + "github.com/google/uuid" +) + +// ResolveEndpointer contains the methods for resolving authority endpoints. +type ResolveEndpointer interface { + ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) +} + +// AccessTokens contains the methods for fetching tokens from different sources. +type AccessTokens interface { + DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (accesstokens.DeviceCodeResult, error) + FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (accesstokens.TokenResponse, error) + FromAuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) + FromRefreshToken(ctx context.Context, appType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken string) (accesstokens.TokenResponse, error) + FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (accesstokens.TokenResponse, error) + FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (accesstokens.TokenResponse, error) + FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (accesstokens.TokenResponse, error) + FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (accesstokens.TokenResponse, error) + FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult accesstokens.DeviceCodeResult) (accesstokens.TokenResponse, error) + FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (accesstokens.TokenResponse, error) +} + +// FetchAuthority will be implemented by authority.Authority. +type FetchAuthority interface { + UserRealm(context.Context, authority.AuthParams) (authority.UserRealm, error) + AADInstanceDiscovery(context.Context, authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// FetchWSTrust contains the methods for interacting with WSTrust endpoints. +type FetchWSTrust interface { + Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) + SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (wstrust.SamlTokenInfo, error) +} + +// Client provides tokens for various types of token requests. +type Client struct { + Resolver ResolveEndpointer + AccessTokens AccessTokens + Authority FetchAuthority + WSTrust FetchWSTrust +} + +// New is the constructor for Token. +func New(httpClient ops.HTTPClient) *Client { + r := ops.New(httpClient) + return &Client{ + Resolver: newAuthorityEndpoint(r), + AccessTokens: r.AccessTokens(), + Authority: r.Authority(), + WSTrust: r.WSTrust(), + } +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance. +func (t *Client) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + return t.Resolver.ResolveEndpoints(ctx, authorityInfo, userPrincipalName) +} + +// AADInstanceDiscovery attempts to discover a tenant endpoint (used in OIDC auth with an authorization endpoint). +// This is done by AAD which allows for aliasing of tenants (windows.sts.net is the same as login.windows.com). +func (t *Client) AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) { + return t.Authority.AADInstanceDiscovery(ctx, authorityInfo) +} + +// AuthCode returns a token based on an authorization code. +func (t *Client) AuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) { + if err := scopeError(req.AuthParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &req.AuthParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + tResp, err := t.AccessTokens.FromAuthCode(ctx, req) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("could not retrieve token from auth code: %w", err) + } + return tResp, nil +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if cred.TokenProvider != nil { + now := time.Now() + scopes := make([]string, len(authParams.Scopes)) + copy(scopes, authParams.Scopes) + params := exported.TokenProviderParameters{ + Claims: authParams.Claims, + CorrelationID: uuid.New().String(), + Scopes: scopes, + TenantID: authParams.AuthorityInfo.Tenant, + } + tr, err := cred.TokenProvider(ctx, params) + if err != nil { + if len(scopes) == 0 { + err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err) + return accesstokens.TokenResponse{}, err + } + return accesstokens.TokenResponse{}, err + } + return accesstokens.TokenResponse{ + AccessToken: tr.AccessToken, + ExpiresOn: internalTime.DurationTime{ + T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second), + }, + GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes}, + }, nil + } + + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromClientSecret(ctx, authParams, cred.Secret) + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromAssertion(ctx, authParams, jwt) +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) OnBehalfOf(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromUserAssertionClientSecret(ctx, authParams, authParams.UserAssertion, cred.Secret) + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + tr, err := t.AccessTokens.FromUserAssertionClientCertificate(ctx, authParams, authParams.UserAssertion, jwt) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil +} + +func (t *Client) Refresh(ctx context.Context, reqType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken accesstokens.RefreshToken) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + tr, err := t.AccessTokens.FromRefreshToken(ctx, reqType, authParams, cc, refreshToken.Secret) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil +} + +// UsernamePassword retrieves a token where a username and password is used. However, if this is +// a user realm of "Federated", this uses SAML tokens. If "Managed", uses normal username/password. +func (t *Client) UsernamePassword(ctx context.Context, authParams authority.AuthParams) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + + if authParams.AuthorityInfo.AuthorityType == authority.ADFS { + if err := t.resolveEndpoint(ctx, &authParams, authParams.Username); err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + userRealm, err := t.Authority.UserRealm(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("problem getting user realm from authority: %w", err) + } + + switch userRealm.AccountType { + case authority.Federated: + mexDoc, err := t.WSTrust.Mex(ctx, userRealm.FederationMetadataURL) + if err != nil { + err = fmt.Errorf("problem getting mex doc from federated url(%s): %w", userRealm.FederationMetadataURL, err) + return accesstokens.TokenResponse{}, err + } + + saml, err := t.WSTrust.SAMLTokenInfo(ctx, authParams, userRealm.CloudAudienceURN, mexDoc.UsernamePasswordEndpoint) + if err != nil { + err = fmt.Errorf("problem getting SAML token info: %w", err) + return accesstokens.TokenResponse{}, err + } + tr, err := t.AccessTokens.FromSamlGrant(ctx, authParams, saml) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil + case authority.Managed: + if len(authParams.Scopes) == 0 { + err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err) + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + return accesstokens.TokenResponse{}, errors.New("unknown account type") +} + +// DeviceCode is the result of a call to Token.DeviceCode(). +type DeviceCode struct { + // Result is the device code result from the first call in the device code flow. This allows + // the caller to retrieve the displayed code that is used to authorize on the second device. + Result accesstokens.DeviceCodeResult + authParams authority.AuthParams + + accessTokens AccessTokens +} + +// Token returns a token AFTER the user uses the user code on the second device. This will block +// until either: (1) the code is input by the user and the service releases a token, (2) the token +// expires, (3) the Context passed to .DeviceCode() is cancelled or expires, (4) some other service +// error occurs. +func (d DeviceCode) Token(ctx context.Context) (accesstokens.TokenResponse, error) { + if d.accessTokens == nil { + return accesstokens.TokenResponse{}, fmt.Errorf("DeviceCode was either created outside its package or the creating method had an error. DeviceCode is not valid") + } + + var cancel context.CancelFunc + if deadline, ok := ctx.Deadline(); !ok || d.Result.ExpiresOn.Before(deadline) { + ctx, cancel = context.WithDeadline(ctx, d.Result.ExpiresOn) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + + var interval = 50 * time.Millisecond + timer := time.NewTimer(interval) + defer timer.Stop() + + for { + timer.Reset(interval) + select { + case <-ctx.Done(): + return accesstokens.TokenResponse{}, ctx.Err() + case <-timer.C: + interval += interval * 2 + if interval > 5*time.Second { + interval = 5 * time.Second + } + } + + token, err := d.accessTokens.FromDeviceCodeResult(ctx, d.authParams, d.Result) + if err != nil && isWaitDeviceCodeErr(err) { + continue + } + return token, err // This handles if it was a non-wait error or success + } +} + +type deviceCodeError struct { + Error string `json:"error"` +} + +func isWaitDeviceCodeErr(err error) bool { + var c errors.CallErr + if !errors.As(err, &c) { + return false + } + if c.Resp.StatusCode != 400 { + return false + } + var dCErr deviceCodeError + defer c.Resp.Body.Close() + body, err := io.ReadAll(c.Resp.Body) + if err != nil { + return false + } + err = json.Unmarshal(body, &dCErr) + if err != nil { + return false + } + if dCErr.Error == "authorization_pending" || dCErr.Error == "slow_down" { + return true + } + return false +} + +// DeviceCode returns a DeviceCode object that can be used to get the code that must be entered on the second +// device and optionally the token once the code has been entered on the second device. +func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams) (DeviceCode, error) { + if err := scopeError(authParams); err != nil { + return DeviceCode{}, err + } + + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return DeviceCode{}, err + } + + dcr, err := t.AccessTokens.DeviceCodeResult(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dcr, authParams: authParams, accessTokens: t.AccessTokens}, nil +} + +func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error { + endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName) + if err != nil { + return fmt.Errorf("unable to resolve an endpoint: %s", err) + } + authParams.Endpoints = endpoints + return nil +} + +// scopeError takes an authority.AuthParams and returns an error +// if len(AuthParams.Scope) == 0. +func scopeError(a authority.AuthParams) error { + // TODO(someone): we could look deeper at the message to determine if + // it's a scope error, but this is a good start. + /* + {error":"invalid_scope","error_description":"AADSTS1002012: The provided value for scope + openid offline_access profile is not valid. Client credential flows must have a scope value + with /.default suffixed to the resource identifier (application ID URI)...} + */ + if len(a.Scopes) == 0 { + return fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which is invalid") + } + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go new file mode 100644 index 00000000..fa6bb61c --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -0,0 +1,451 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package accesstokens exposes a REST client for querying backend systems to get various types of +access tokens (oauth) for use in authentication. + +These calls are of type "application/x-www-form-urlencoded". This means we use url.Values to +represent arguments and then encode them into the POST body message. We receive JSON in +return for the requests. The request definition is defined in https://tools.ietf.org/html/rfc7521#section-4.2 . +*/ +package accesstokens + +import ( + "context" + "crypto" + + /* #nosec */ + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/golang-jwt/jwt/v4" + "github.com/google/uuid" +) + +const ( + grantType = "grant_type" + deviceCode = "device_code" + clientID = "client_id" + clientInfo = "client_info" + clientInfoVal = "1" + username = "username" + password = "password" +) + +//go:generate stringer -type=AppType + +// AppType is whether the authorization code flow is for a public or confidential client. +type AppType int8 + +const ( + // ATUnknown is the zero value when the type hasn't been set. + ATUnknown AppType = iota + // ATPublic indicates this if for the Public.Client. + ATPublic + // ATConfidential indicates this if for the Confidential.Client. + ATConfidential +) + +type urlFormCaller interface { + URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error +} + +// DeviceCodeResponse represents the HTTP response received from the device code endpoint +type DeviceCodeResponse struct { + authority.OAuthResponseBase + + UserCode string `json:"user_code"` + DeviceCode string `json:"device_code"` + VerificationURL string `json:"verification_url"` + ExpiresIn int `json:"expires_in"` + Interval int `json:"interval"` + Message string `json:"message"` + + AdditionalFields map[string]interface{} +} + +// Convert converts the DeviceCodeResponse to a DeviceCodeResult +func (dcr DeviceCodeResponse) Convert(clientID string, scopes []string) DeviceCodeResult { + expiresOn := time.Now().UTC().Add(time.Duration(dcr.ExpiresIn) * time.Second) + return NewDeviceCodeResult(dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, expiresOn, dcr.Interval, dcr.Message, clientID, scopes) +} + +// Credential represents the credential used in confidential client flows. This can be either +// a Secret or Cert/Key. +type Credential struct { + // Secret contains the credential secret if we are doing auth by secret. + Secret string + + // Cert is the public certificate, if we're authenticating by certificate. + Cert *x509.Certificate + // Key is the private key for signing, if we're authenticating by certificate. + Key crypto.PrivateKey + // X5c is the JWT assertion's x5c header value, required for SN/I authentication. + X5c []string + + // AssertionCallback is a function provided by the application, if we're authenticating by assertion. + AssertionCallback func(context.Context, exported.AssertionRequestOptions) (string, error) + + // TokenProvider is a function provided by the application that implements custom authentication + // logic for a confidential client + TokenProvider func(context.Context, exported.TokenProviderParameters) (exported.TokenProviderResult, error) +} + +// JWT gets the jwt assertion when the credential is not using a secret. +func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (string, error) { + if c.AssertionCallback != nil { + options := exported.AssertionRequestOptions{ + ClientID: authParams.ClientID, + TokenEndpoint: authParams.Endpoints.TokenEndpoint, + } + return c.AssertionCallback(ctx, options) + } + + token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "aud": authParams.Endpoints.TokenEndpoint, + "exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)), + "iss": authParams.ClientID, + "jti": uuid.New().String(), + "nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)), + "sub": authParams.ClientID, + }) + token.Header = map[string]interface{}{ + "alg": "RS256", + "typ": "JWT", + "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)), + } + + if authParams.SendX5C { + token.Header["x5c"] = c.X5c + } + + assertion, err := token.SignedString(c.Key) + if err != nil { + return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err) + } + return assertion, nil +} + +// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT. +// https://tools.ietf.org/html/rfc7517#section-4.8 +func thumbprint(cert *x509.Certificate) []byte { + /* #nosec */ + a := sha1.Sum(cert.Raw) + return a[:] +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm urlFormCaller + + testing bool +} + +// FromUsernamePassword uses a username and password to get an access token. +func (c Client) FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.Password) + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +// AuthCodeRequest stores the values required to request a token from the authority using an authorization code +type AuthCodeRequest struct { + AuthParams authority.AuthParams + Code string + CodeChallenge string + Credential *Credential + AppType AppType +} + +// NewCodeChallengeRequest returns an AuthCodeRequest that uses a code challenge.. +func NewCodeChallengeRequest(params authority.AuthParams, appType AppType, cc *Credential, code, challenge string) (AuthCodeRequest, error) { + if appType == ATUnknown { + return AuthCodeRequest{}, fmt.Errorf("bug: NewCodeChallengeRequest() called with AppType == ATUnknown") + } + return AuthCodeRequest{ + AuthParams: params, + AppType: appType, + Code: code, + CodeChallenge: challenge, + Credential: cc, + }, nil +} + +// FromAuthCode uses an authorization code to retrieve an access token. +func (c Client) FromAuthCode(ctx context.Context, req AuthCodeRequest) (TokenResponse, error) { + var qv url.Values + + switch req.AppType { + case ATUnknown: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == ATUnknown") + case ATConfidential: + var err error + if req.Credential == nil { + return TokenResponse{}, fmt.Errorf("AuthCodeRequest had nil Credential for Confidential app") + } + qv, err = prepURLVals(ctx, req.Credential, req.AuthParams) + if err != nil { + return TokenResponse{}, err + } + case ATPublic: + qv = url.Values{} + default: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == %v, which we do not recongnize", req.AppType) + } + + qv.Set(grantType, grant.AuthCode) + qv.Set("code", req.Code) + qv.Set("code_verifier", req.CodeChallenge) + qv.Set("redirect_uri", req.AuthParams.Redirecturi) + qv.Set(clientID, req.AuthParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, req.AuthParams) + if err := addClaims(qv, req.AuthParams); err != nil { + return TokenResponse{}, err + } + + return c.doTokenResp(ctx, req.AuthParams, qv) +} + +// FromRefreshToken uses a refresh token (for refreshing credentials) to get a new access token. +func (c Client) FromRefreshToken(ctx context.Context, appType AppType, authParams authority.AuthParams, cc *Credential, refreshToken string) (TokenResponse, error) { + qv := url.Values{} + if appType == ATConfidential { + var err error + qv, err = prepURLVals(ctx, cc, authParams) + if err != nil { + return TokenResponse{}, err + } + } + if err := addClaims(qv, authParams); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.RefreshToken) + qv.Set(clientID, authParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("refresh_token", refreshToken) + addScopeQueryParam(qv, authParams) + + return c.doTokenResp(ctx, authParams, qv) +} + +// FromClientSecret uses a client's secret (aka password) to get a new token. +func (c Client) FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_secret", clientSecret) + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromClientSecret(): %w", err) + } + return token, nil +} + +func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromAssertion(): %w", err) + } + return token, nil +} + +func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.JWT) + qv.Set(clientID, authParameters.ClientID) + qv.Set("client_secret", clientSecret) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.JWT) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (DeviceCodeResult, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return DeviceCodeResult{}, err + } + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + endpoint := strings.Replace(authParameters.Endpoints.TokenEndpoint, "token", "devicecode", -1) + + resp := DeviceCodeResponse{} + err := c.Comm.URLFormCall(ctx, endpoint, qv, &resp) + if err != nil { + return DeviceCodeResult{}, err + } + + return resp.Convert(authParameters.ClientID, authParameters.Scopes), nil +} + +func (c Client) FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult DeviceCodeResult) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.DeviceCode) + qv.Set(deviceCode, deviceCodeResult.DeviceCode) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("assertion", base64.StdEncoding.WithPadding(base64.StdPadding).EncodeToString([]byte(samlGrant.Assertion))) + addScopeQueryParam(qv, authParameters) + + switch samlGrant.AssertionType { + case grant.SAMLV1: + qv.Set(grantType, grant.SAMLV1) + case grant.SAMLV2: + qv.Set(grantType, grant.SAMLV2) + default: + return TokenResponse{}, fmt.Errorf("GetAccessTokenFromSamlGrant returned unknown SAML assertion type: %q", samlGrant.AssertionType) + } + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams, qv url.Values) (TokenResponse, error) { + resp := TokenResponse{} + err := c.Comm.URLFormCall(ctx, authParams.Endpoints.TokenEndpoint, qv, &resp) + if err != nil { + return resp, err + } + resp.ComputeScope(authParams) + if c.testing { + return resp, nil + } + return resp, resp.Validate() +} + +// prepURLVals returns an url.Values that sets various key/values if we are doing secrets +// or JWT assertions. +func prepURLVals(ctx context.Context, cc *Credential, authParams authority.AuthParams) (url.Values, error) { + params := url.Values{} + if cc.Secret != "" { + params.Set("client_secret", cc.Secret) + return params, nil + } + + jwt, err := cc.JWT(ctx, authParams) + if err != nil { + return nil, err + } + params.Set("client_assertion", jwt) + params.Set("client_assertion_type", grant.ClientAssertion) + return params, nil +} + +// openid required to get an id token +// offline_access required to get a refresh token +// profile required to get the client_info field back +var detectDefaultScopes = map[string]bool{ + "openid": true, + "offline_access": true, + "profile": true, +} + +var defaultScopes = []string{"openid", "offline_access", "profile"} + +func AppendDefaultScopes(authParameters authority.AuthParams) []string { + scopes := make([]string, 0, len(authParameters.Scopes)+len(defaultScopes)) + for _, scope := range authParameters.Scopes { + s := strings.TrimSpace(scope) + if s == "" { + continue + } + if detectDefaultScopes[scope] { + continue + } + scopes = append(scopes, scope) + } + scopes = append(scopes, defaultScopes...) + return scopes +} + +// addClaims adds client capabilities and claims from AuthParams to the given url.Values +func addClaims(v url.Values, ap authority.AuthParams) error { + claims, err := ap.MergeCapabilitiesAndClaims() + if err == nil && claims != "" { + v.Set("claims", claims) + } + return err +} + +func addScopeQueryParam(queryParams url.Values, authParameters authority.AuthParams) { + scopes := AppendDefaultScopes(authParameters) + queryParams.Set("scope", strings.Join(scopes, " ")) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go new file mode 100644 index 00000000..3bec4a67 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=AppType"; DO NOT EDIT. + +package accesstokens + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATPublic-1] + _ = x[ATConfidential-2] +} + +const _AppType_name = "ATUnknownATPublicATConfidential" + +var _AppType_index = [...]uint8{0, 9, 17, 31} + +func (i AppType) String() string { + if i < 0 || i >= AppType(len(_AppType_index)-1) { + return "AppType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AppType_name[_AppType_index[i]:_AppType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go new file mode 100644 index 00000000..b3892bf3 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go @@ -0,0 +1,335 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package accesstokens + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// IDToken consists of all the information used to validate a user. +// https://docs.microsoft.com/azure/active-directory/develop/id-tokens . +type IDToken struct { + PreferredUsername string `json:"preferred_username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + Oid string `json:"oid,omitempty"` + TenantID string `json:"tid,omitempty"` + Subject string `json:"sub,omitempty"` + UPN string `json:"upn,omitempty"` + Email string `json:"email,omitempty"` + AlternativeID string `json:"alternative_id,omitempty"` + Issuer string `json:"iss,omitempty"` + Audience string `json:"aud,omitempty"` + ExpirationTime int64 `json:"exp,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + RawToken string + + AdditionalFields map[string]interface{} +} + +var null = []byte("null") + +// UnmarshalJSON implements json.Unmarshaler. +func (i *IDToken) UnmarshalJSON(b []byte) error { + if bytes.Equal(null, b) { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type idToken2 IDToken + + jwt := strings.Trim(string(b), `"`) + jwtArr := strings.Split(jwt, ".") + if len(jwtArr) < 2 { + return errors.New("IDToken returned from server is invalid") + } + + jwtPart := jwtArr[1] + jwtDecoded, err := decodeJWT(jwtPart) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken, problem decoding JWT: %w", err) + } + + token := idToken2{} + err = json.Unmarshal(jwtDecoded, &token) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken: %w", err) + } + token.RawToken = jwt + + *i = IDToken(token) + return nil +} + +// IsZero indicates if the IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// LocalAccountID extracts an account's local account ID from an ID token. +func (i IDToken) LocalAccountID() string { + if i.Oid != "" { + return i.Oid + } + return i.Subject +} + +// jwtDecoder is provided to allow tests to provide their own. +var jwtDecoder = decodeJWT + +// ClientInfo is used to create a Home Account ID for an account. +type ClientInfo struct { + UID string `json:"uid"` + UTID string `json:"utid"` + + AdditionalFields map[string]interface{} +} + +// UnmarshalJSON implements json.Unmarshaler.s +func (c *ClientInfo) UnmarshalJSON(b []byte) error { + s := strings.Trim(string(b), `"`) + // Client info may be empty in some flows, e.g. certificate exchange. + if len(s) == 0 { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type clientInfo2 ClientInfo + + raw, err := jwtDecoder(s) + if err != nil { + return fmt.Errorf("TokenResponse client_info field had JWT decode error: %w", err) + } + + var c2 clientInfo2 + + err = json.Unmarshal(raw, &c2) + if err != nil { + return fmt.Errorf("was unable to unmarshal decoded JWT in TokenRespone to ClientInfo: %w", err) + } + + *c = ClientInfo(c2) + return nil +} + +// HomeAccountID creates the home account ID. +func (c ClientInfo) HomeAccountID() string { + if c.UID == "" { + return "" + } else if c.UTID == "" { + return fmt.Sprintf("%s.%s", c.UID, c.UID) + } else { + return fmt.Sprintf("%s.%s", c.UID, c.UTID) + } +} + +// Scopes represents scopes in a TokenResponse. +type Scopes struct { + Slice []string +} + +// UnmarshalJSON implements json.Unmarshal. +func (s *Scopes) UnmarshalJSON(b []byte) error { + str := strings.Trim(string(b), `"`) + if len(str) == 0 { + return nil + } + sl := strings.Split(str, " ") + s.Slice = sl + return nil +} + +// TokenResponse is the information that is returned from a token endpoint during a token acquisition flow. +type TokenResponse struct { + authority.OAuthResponseBase + + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + FamilyID string `json:"foci"` + IDToken IDToken `json:"id_token"` + ClientInfo ClientInfo `json:"client_info"` + ExpiresOn internalTime.DurationTime `json:"expires_in"` + ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"` + GrantedScopes Scopes `json:"scope"` + DeclinedScopes []string // This is derived + + AdditionalFields map[string]interface{} + + scopesComputed bool +} + +// ComputeScope computes the final scopes based on what was granted by the server and +// what our AuthParams were from the authority server. Per OAuth spec, if no scopes are returned, the response should be treated as if all scopes were granted +// This behavior can be observed in client assertion flows, but can happen at any time, this check ensures we treat +// those special responses properly Link to spec: https://tools.ietf.org/html/rfc6749#section-3.3 +func (tr *TokenResponse) ComputeScope(authParams authority.AuthParams) { + if len(tr.GrantedScopes.Slice) == 0 { + tr.GrantedScopes = Scopes{Slice: authParams.Scopes} + } else { + tr.DeclinedScopes = findDeclinedScopes(authParams.Scopes, tr.GrantedScopes.Slice) + } + tr.scopesComputed = true +} + +// Validate validates the TokenResponse has basic valid values. It must be called +// after ComputeScopes() is called. +func (tr *TokenResponse) Validate() error { + if tr.Error != "" { + return fmt.Errorf("%s: %s", tr.Error, tr.ErrorDescription) + } + + if tr.AccessToken == "" { + return errors.New("response is missing access_token") + } + + if !tr.scopesComputed { + return fmt.Errorf("TokenResponse hasn't had ScopesComputed() called") + } + return nil +} + +func (tr *TokenResponse) CacheKey(authParams authority.AuthParams) string { + if authParams.AuthorizationType == authority.ATOnBehalfOf { + return authParams.AssertionHash() + } + if authParams.AuthorizationType == authority.ATClientCredentials { + return authParams.AppKey() + } + if authParams.IsConfidentialClient || authParams.AuthorizationType == authority.ATRefreshToken { + return tr.ClientInfo.HomeAccountID() + } + return "" +} + +func findDeclinedScopes(requestedScopes []string, grantedScopes []string) []string { + declined := []string{} + grantedMap := map[string]bool{} + for _, s := range grantedScopes { + grantedMap[strings.ToLower(s)] = true + } + // Comparing the requested scopes with the granted scopes to see if there are any scopes that have been declined. + for _, r := range requestedScopes { + if !grantedMap[strings.ToLower(r)] { + declined = append(declined, r) + } + } + return declined +} + +// decodeJWT decodes a JWT and converts it to a byte array representing a JSON object +// JWT has headers and payload base64url encoded without padding +// https://tools.ietf.org/html/rfc7519#section-3 and +// https://tools.ietf.org/html/rfc7515#section-2 +func decodeJWT(data string) ([]byte, error) { + // https://tools.ietf.org/html/rfc7515#appendix-C + return base64.RawURLEncoding.DecodeString(data) +} + +// RefreshToken is the JSON representation of a MSAL refresh token for encoding to storage. +type RefreshToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + FamilyID string `json:"family_id,omitempty"` + Secret string `json:"secret,omitempty"` + Realm string `json:"realm,omitempty"` + Target string `json:"target,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewRefreshToken is the constructor for RefreshToken. +func NewRefreshToken(homeID, env, clientID, refreshToken, familyID string) RefreshToken { + return RefreshToken{ + HomeAccountID: homeID, + Environment: env, + CredentialType: "RefreshToken", + ClientID: clientID, + FamilyID: familyID, + Secret: refreshToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (rt RefreshToken) Key() string { + var fourth = rt.FamilyID + if fourth == "" { + fourth = rt.ClientID + } + + return strings.Join( + []string{rt.HomeAccountID, rt.Environment, rt.CredentialType, fourth}, + shared.CacheKeySeparator, + ) +} + +func (rt RefreshToken) GetSecret() string { + return rt.Secret +} + +// DeviceCodeResult stores the response from the STS device code endpoint. +type DeviceCodeResult struct { + // UserCode is the code the user needs to provide when authentication at the verification URI. + UserCode string + // DeviceCode is the code used in the access token request. + DeviceCode string + // VerificationURL is the the URL where user can authenticate. + VerificationURL string + // ExpiresOn is the expiration time of device code in seconds. + ExpiresOn time.Time + // Interval is the interval at which the STS should be polled at. + Interval int + // Message is the message which should be displayed to the user. + Message string + // ClientID is the UUID issued by the authorization server for your application. + ClientID string + // Scopes is the OpenID scopes used to request access a protected API. + Scopes []string +} + +// NewDeviceCodeResult creates a DeviceCodeResult instance. +func NewDeviceCodeResult(userCode, deviceCode, verificationURL string, expiresOn time.Time, interval int, message, clientID string, scopes []string) DeviceCodeResult { + return DeviceCodeResult{userCode, deviceCode, verificationURL, expiresOn, interval, message, clientID, scopes} +} + +func (dcr DeviceCodeResult) String() string { + return fmt.Sprintf("UserCode: (%v)\nDeviceCode: (%v)\nURL: (%v)\nMessage: (%v)\n", dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, dcr.Message) + +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go new file mode 100644 index 00000000..7b2ccb4f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -0,0 +1,552 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package authority + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" + + "github.com/google/uuid" +) + +const ( + authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" + instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" + tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" + regionName = "REGION_NAME" + defaultAPIVersion = "2021-10-01" + imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion + autoDetectRegion = "TryAutoDetect" +) + +// These are various hosts that host AAD Instance discovery endpoints. +const ( + defaultHost = "login.microsoftonline.com" + loginMicrosoft = "login.microsoft.com" + loginWindows = "login.windows.net" + loginSTSWindows = "sts.windows.net" + loginMicrosoftOnline = defaultHost +) + +// jsonCaller is an interface that allows us to mock the JSONCall method. +type jsonCaller interface { + JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error +} + +var aadTrustedHostList = map[string]bool{ + "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list + "login.chinacloudapi.cn": true, // Microsoft Azure China + "login.microsoftonline.de": true, // Microsoft Azure Blackforest + "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy + "login.microsoftonline.us": true, // Microsoft Azure US Government + "login.microsoftonline.com": true, // Microsoft Azure Worldwide + "login.cloudgovapi.us": true, // Microsoft Azure US Government +} + +// TrustedHost checks if an AAD host is trusted/valid. +func TrustedHost(host string) bool { + if _, ok := aadTrustedHostList[host]; ok { + return true + } + return false +} + +// OAuthResponseBase is the base JSON return message for an OAuth call. +// This is embedded in other calls to get the base fields from every response. +type OAuthResponseBase struct { + Error string `json:"error"` + SubError string `json:"suberror"` + ErrorDescription string `json:"error_description"` + ErrorCodes []int `json:"error_codes"` + CorrelationID string `json:"correlation_id"` + Claims string `json:"claims"` +} + +// TenantDiscoveryResponse is the tenant endpoints from the OpenID configuration endpoint. +type TenantDiscoveryResponse struct { + OAuthResponseBase + + AuthorizationEndpoint string `json:"authorization_endpoint"` + TokenEndpoint string `json:"token_endpoint"` + Issuer string `json:"issuer"` + + AdditionalFields map[string]interface{} +} + +// Validate validates that the response had the correct values required. +func (r *TenantDiscoveryResponse) Validate() error { + switch "" { + case r.AuthorizationEndpoint: + return errors.New("TenantDiscoveryResponse: authorize endpoint was not found in the openid configuration") + case r.TokenEndpoint: + return errors.New("TenantDiscoveryResponse: token endpoint was not found in the openid configuration") + case r.Issuer: + return errors.New("TenantDiscoveryResponse: issuer was not found in the openid configuration") + } + return nil +} + +type InstanceDiscoveryMetadata struct { + PreferredNetwork string `json:"preferred_network"` + PreferredCache string `json:"preferred_cache"` + Aliases []string `json:"aliases"` + + AdditionalFields map[string]interface{} +} + +type InstanceDiscoveryResponse struct { + TenantDiscoveryEndpoint string `json:"tenant_discovery_endpoint"` + Metadata []InstanceDiscoveryMetadata `json:"metadata"` + + AdditionalFields map[string]interface{} +} + +//go:generate stringer -type=AuthorizeType + +// AuthorizeType represents the type of token flow. +type AuthorizeType int + +// These are all the types of token flows. +const ( + ATUnknown AuthorizeType = iota + ATUsernamePassword + ATWindowsIntegrated + ATAuthCode + ATInteractive + ATClientCredentials + ATDeviceCode + ATRefreshToken + AccountByID + ATOnBehalfOf +) + +// These are all authority types +const ( + AAD = "MSSTS" + ADFS = "ADFS" +) + +// AuthParams represents the parameters used for authorization for token acquisition. +type AuthParams struct { + AuthorityInfo Info + CorrelationID string + Endpoints Endpoints + ClientID string + // Redirecturi is used for auth flows that specify a redirect URI (e.g. local server for interactive auth flow). + Redirecturi string + HomeAccountID string + // Username is the user-name portion for username/password auth flow. + Username string + // Password is the password portion for username/password auth flow. + Password string + // Scopes is the list of scopes the user consents to. + Scopes []string + // AuthorizationType specifies the auth flow being used. + AuthorizationType AuthorizeType + // State is a random value used to prevent cross-site request forgery attacks. + State string + // CodeChallenge is derived from a code verifier and is sent in the auth request. + CodeChallenge string + // CodeChallengeMethod describes the method used to create the CodeChallenge. + CodeChallengeMethod string + // Prompt specifies the user prompt type during interactive auth. + Prompt string + // IsConfidentialClient specifies if it is a confidential client. + IsConfidentialClient bool + // SendX5C specifies if x5c claim(public key of the certificate) should be sent to STS. + SendX5C bool + // UserAssertion is the access token used to acquire token on behalf of user + UserAssertion string + // Capabilities the client will include with each token request, for example "CP1". + // Call [NewClientCapabilities] to construct a value for this field. + Capabilities ClientCapabilities + // Claims required for an access token to satisfy a conditional access policy + Claims string + // KnownAuthorityHosts don't require metadata discovery because they're known to the user + KnownAuthorityHosts []string + // LoginHint is a username with which to pre-populate account selection during interactive auth + LoginHint string + // DomainHint is a directive that can be used to accelerate the user to their federated IdP sign-in page + DomainHint string +} + +// NewAuthParams creates an authorization parameters object. +func NewAuthParams(clientID string, authorityInfo Info) AuthParams { + return AuthParams{ + ClientID: clientID, + AuthorityInfo: authorityInfo, + CorrelationID: uuid.New().String(), + } +} + +// WithTenant returns a copy of the AuthParams having the specified tenant ID. If the given +// ID is empty, the copy is identical to the original. This function returns an error in +// several cases: +// - ID isn't specific (for example, it's "common") +// - ID is non-empty and the authority doesn't support tenants (for example, it's an ADFS authority) +// - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint +// - the resulting authority URL is invalid +func (p AuthParams) WithTenant(ID string) (AuthParams, error) { + switch ID { + case "", p.AuthorityInfo.Tenant: + // keep the default tenant because the caller didn't override it + return p, nil + case "common", "consumers", "organizations": + if p.AuthorityInfo.AuthorityType == AAD { + return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID) + } + // else we'll return a better error below + } + if p.AuthorityInfo.AuthorityType != AAD { + return p, errors.New("the authority doesn't support tenants") + } + if p.AuthorityInfo.Tenant == "consumers" { + return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) + } + authority := "https://" + path.Join(p.AuthorityInfo.Host, ID) + info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled) + if err == nil { + info.Region = p.AuthorityInfo.Region + p.AuthorityInfo = info + } + return p, err +} + +// MergeCapabilitiesAndClaims combines client capabilities and challenge claims into a value suitable for an authentication request's "claims" parameter. +func (p AuthParams) MergeCapabilitiesAndClaims() (string, error) { + claims := p.Claims + if len(p.Capabilities.asMap) > 0 { + if claims == "" { + // without claims the result is simply the capabilities + return p.Capabilities.asJSON, nil + } + // Otherwise, merge claims and capabilties into a single JSON object. + // We handle the claims challenge as a map because we don't know its structure. + var challenge map[string]any + if err := json.Unmarshal([]byte(claims), &challenge); err != nil { + return "", fmt.Errorf(`claims must be JSON. Are they base64 encoded? json.Unmarshal returned "%v"`, err) + } + if err := merge(p.Capabilities.asMap, challenge); err != nil { + return "", err + } + b, err := json.Marshal(challenge) + if err != nil { + return "", err + } + claims = string(b) + } + return claims, nil +} + +// merges a into b without overwriting b's values. Returns an error when a and b share a key for which either has a non-object value. +func merge(a, b map[string]any) error { + for k, av := range a { + if bv, ok := b[k]; !ok { + // b doesn't contain this key => simply set it to a's value + b[k] = av + } else { + // b does contain this key => recursively merge a[k] into b[k], provided both are maps. If a[k] or b[k] isn't + // a map, return an error because merging would overwrite some value in b. Errors shouldn't occur in practice + // because the challenge will be from AAD, which knows the capabilities format. + if A, ok := av.(map[string]any); ok { + if B, ok := bv.(map[string]any); ok { + return merge(A, B) + } else { + // b[k] isn't a map + return errors.New("challenge claims conflict with client capabilities") + } + } else { + // a[k] isn't a map + return errors.New("challenge claims conflict with client capabilities") + } + } + } + return nil +} + +// ClientCapabilities stores capabilities in the formats used by AuthParams.MergeCapabilitiesAndClaims. +// [NewClientCapabilities] precomputes these representations because capabilities are static for the +// lifetime of a client and are included with every authentication request i.e., these computations +// always have the same result and would otherwise have to be repeated for every request. +type ClientCapabilities struct { + // asJSON is for the common case: adding the capabilities to an auth request with no challenge claims + asJSON string + // asMap is for merging the capabilities with challenge claims + asMap map[string]any +} + +func NewClientCapabilities(capabilities []string) (ClientCapabilities, error) { + c := ClientCapabilities{} + var err error + if len(capabilities) > 0 { + cpbs := make([]string, len(capabilities)) + for i := 0; i < len(cpbs); i++ { + cpbs[i] = fmt.Sprintf(`"%s"`, capabilities[i]) + } + c.asJSON = fmt.Sprintf(`{"access_token":{"xms_cc":{"values":[%s]}}}`, strings.Join(cpbs, ",")) + // note our JSON is valid but we can't stop users breaking it with garbage like "}" + err = json.Unmarshal([]byte(c.asJSON), &c.asMap) + } + return c, err +} + +// Info consists of information about the authority. +type Info struct { + Host string + CanonicalAuthorityURI string + AuthorityType string + UserRealmURIPrefix string + ValidateAuthority bool + Tenant string + Region string + InstanceDiscoveryDisabled bool +} + +func firstPathSegment(u *url.URL) (string, error) { + pathParts := strings.Split(u.EscapedPath(), "/") + if len(pathParts) >= 2 { + return pathParts[1], nil + } + + return "", errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) +} + +// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. +func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) { + u, err := url.Parse(strings.ToLower(authority)) + if err != nil || u.Scheme != "https" { + return Info{}, errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) + } + + tenant, err := firstPathSegment(u) + if err != nil { + return Info{}, err + } + authorityType := AAD + if tenant == "adfs" { + authorityType = ADFS + } + + // u.Host includes the port, if any, which is required for private cloud deployments + return Info{ + Host: u.Host, + CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant), + AuthorityType: authorityType, + UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), + ValidateAuthority: validateAuthority, + Tenant: tenant, + InstanceDiscoveryDisabled: instanceDiscoveryDisabled, + }, nil +} + +// Endpoints consists of the endpoints from the tenant discovery response. +type Endpoints struct { + AuthorizationEndpoint string + TokenEndpoint string + selfSignedJwtAudience string + authorityHost string +} + +// NewEndpoints creates an Endpoints object. +func NewEndpoints(authorizationEndpoint string, tokenEndpoint string, selfSignedJwtAudience string, authorityHost string) Endpoints { + return Endpoints{authorizationEndpoint, tokenEndpoint, selfSignedJwtAudience, authorityHost} +} + +// UserRealmAccountType refers to the type of user realm. +type UserRealmAccountType string + +// These are the different types of user realms. +const ( + Unknown UserRealmAccountType = "" + Federated UserRealmAccountType = "Federated" + Managed UserRealmAccountType = "Managed" +) + +// UserRealm is used for the username password request to determine user type +type UserRealm struct { + AccountType UserRealmAccountType `json:"account_type"` + DomainName string `json:"domain_name"` + CloudInstanceName string `json:"cloud_instance_name"` + CloudAudienceURN string `json:"cloud_audience_urn"` + + // required if accountType is Federated + FederationProtocol string `json:"federation_protocol"` + FederationMetadataURL string `json:"federation_metadata_url"` + + AdditionalFields map[string]interface{} +} + +func (u UserRealm) validate() error { + switch "" { + case string(u.AccountType): + return errors.New("the account type (Federated or Managed) is missing") + case u.DomainName: + return errors.New("domain name of user realm is missing") + case u.CloudInstanceName: + return errors.New("cloud instance name of user realm is missing") + case u.CloudAudienceURN: + return errors.New("cloud Instance URN is missing") + } + + if u.AccountType == Federated { + switch "" { + case u.FederationProtocol: + return errors.New("federation protocol of user realm is missing") + case u.FederationMetadataURL: + return errors.New("federation metadata URL of user realm is missing") + } + } + return nil +} + +// Client represents the REST calls to authority backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm jsonCaller // *comm.Client +} + +func (c Client) UserRealm(ctx context.Context, authParams AuthParams) (UserRealm, error) { + endpoint := fmt.Sprintf("https://%s/common/UserRealm/%s", authParams.Endpoints.authorityHost, url.PathEscape(authParams.Username)) + qv := url.Values{ + "api-version": []string{"1.0"}, + } + + resp := UserRealm{} + err := c.Comm.JSONCall( + ctx, + endpoint, + http.Header{"client-request-id": []string{authParams.CorrelationID}}, + qv, + nil, + &resp, + ) + if err != nil { + return resp, err + } + + return resp, resp.validate() +} + +func (c Client) GetTenantDiscoveryResponse(ctx context.Context, openIDConfigurationEndpoint string) (TenantDiscoveryResponse, error) { + resp := TenantDiscoveryResponse{} + err := c.Comm.JSONCall( + ctx, + openIDConfigurationEndpoint, + http.Header{}, + nil, + nil, + &resp, + ) + + return resp, err +} + +// AADInstanceDiscovery attempts to discover a tenant endpoint (used in OIDC auth with an authorization endpoint). +// This is done by AAD which allows for aliasing of tenants (windows.sts.net is the same as login.windows.com). +func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (InstanceDiscoveryResponse, error) { + region := "" + var err error + resp := InstanceDiscoveryResponse{} + if authorityInfo.Region != "" && authorityInfo.Region != autoDetectRegion { + region = authorityInfo.Region + } else if authorityInfo.Region == autoDetectRegion { + region = detectRegion(ctx) + } + if region != "" { + environment := authorityInfo.Host + switch environment { + case loginMicrosoft, loginWindows, loginSTSWindows, defaultHost: + environment = loginMicrosoft + } + + resp.TenantDiscoveryEndpoint = fmt.Sprintf(tenantDiscoveryEndpointWithRegion, region, environment, authorityInfo.Tenant) + metadata := InstanceDiscoveryMetadata{ + PreferredNetwork: fmt.Sprintf("%v.%v", region, authorityInfo.Host), + PreferredCache: authorityInfo.Host, + Aliases: []string{fmt.Sprintf("%v.%v", region, authorityInfo.Host), authorityInfo.Host}, + } + resp.Metadata = []InstanceDiscoveryMetadata{metadata} + } else { + qv := url.Values{} + qv.Set("api-version", "1.1") + qv.Set("authorization_endpoint", fmt.Sprintf(authorizationEndpoint, authorityInfo.Host, authorityInfo.Tenant)) + + discoveryHost := defaultHost + if TrustedHost(authorityInfo.Host) { + discoveryHost = authorityInfo.Host + } + + endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost) + err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp) + } + return resp, err +} + +func detectRegion(ctx context.Context) string { + region := os.Getenv(regionName) + if region != "" { + region = strings.ReplaceAll(region, " ", "") + return strings.ToLower(region) + } + // HTTP call to IMDS endpoint to get region + // Refer : https://identitydivision.visualstudio.com/DevEx/_git/AuthLibrariesApiReview?path=%2FPinAuthToRegion%2FAAD%20SDK%20Proposal%20to%20Pin%20Auth%20to%20region.md&_a=preview&version=GBdev + // Set a 2 second timeout for this http client which only does calls to IMDS endpoint + client := http.Client{ + Timeout: time.Duration(2 * time.Second), + } + req, _ := http.NewRequest("GET", imdsEndpoint, nil) + req.Header.Set("Metadata", "true") + resp, err := client.Do(req) + // If the request times out or there is an error, it is retried once + if err != nil || resp.StatusCode != 200 { + resp, err = client.Do(req) + if err != nil || resp.StatusCode != 200 { + return "" + } + } + defer resp.Body.Close() + response, err := io.ReadAll(resp.Body) + if err != nil { + return "" + } + return string(response) +} + +func (a *AuthParams) CacheKey(isAppCache bool) string { + if a.AuthorizationType == ATOnBehalfOf { + return a.AssertionHash() + } + if a.AuthorizationType == ATClientCredentials || isAppCache { + return a.AppKey() + } + if a.AuthorizationType == ATRefreshToken || a.AuthorizationType == AccountByID { + return a.HomeAccountID + } + return "" +} +func (a *AuthParams) AssertionHash() string { + hasher := sha256.New() + // Per documentation this never returns an error : https://pkg.go.dev/hash#pkg-types + _, _ = hasher.Write([]byte(a.UserAssertion)) + sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + return sha +} + +func (a *AuthParams) AppKey() string { + if a.AuthorityInfo.Tenant != "" { + return fmt.Sprintf("%s_%s_AppTokenCache", a.ClientID, a.AuthorityInfo.Tenant) + } + return fmt.Sprintf("%s__AppTokenCache", a.ClientID) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go new file mode 100644 index 00000000..10039773 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=AuthorizeType"; DO NOT EDIT. + +package authority + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATUsernamePassword-1] + _ = x[ATWindowsIntegrated-2] + _ = x[ATAuthCode-3] + _ = x[ATInteractive-4] + _ = x[ATClientCredentials-5] + _ = x[ATDeviceCode-6] + _ = x[ATRefreshToken-7] +} + +const _AuthorizeType_name = "ATUnknownATUsernamePasswordATWindowsIntegratedATAuthCodeATInteractiveATClientCredentialsATDeviceCodeATRefreshToken" + +var _AuthorizeType_index = [...]uint8{0, 9, 27, 46, 56, 69, 88, 100, 114} + +func (i AuthorizeType) String() string { + if i < 0 || i >= AuthorizeType(len(_AuthorizeType_index)-1) { + return "AuthorizeType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AuthorizeType_name[_AuthorizeType_index[i]:_AuthorizeType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go new file mode 100644 index 00000000..7d9ec7cd --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -0,0 +1,320 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package comm provides helpers for communicating with HTTP backends. +package comm + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "runtime" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" + "github.com/google/uuid" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient interface { + // Do sends an HTTP request and returns an HTTP response. + Do(req *http.Request) (*http.Response, error) + + // CloseIdleConnections closes any idle connections in a "keep-alive" state. + CloseIdleConnections() +} + +// Client provides a wrapper to our *http.Client that handles compression and serialization needs. +type Client struct { + client HTTPClient +} + +// New returns a new Client object. +func New(httpClient HTTPClient) *Client { + if httpClient == nil { + panic("http.Client cannot == nil") + } + + return &Client{client: httpClient} +} + +// JSONCall connects to the REST endpoint passing the HTTP query values, headers and JSON conversion +// of body in the HTTP body. It automatically handles compression and decompression with gzip. The response is JSON +// unmarshalled into resp. resp must be a pointer to a struct. If the body struct contains a field called +// "AdditionalFields" we use a custom marshal/unmarshal engine. +func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error { + if qv == nil { + qv = url.Values{} + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + // Choose a JSON marshal/unmarshal depending on if we have AdditionalFields attribute. + var marshal = json.Marshal + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + marshal = customJSON.Marshal + unmarshal = customJSON.Unmarshal + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + addStdHeaders(headers) + + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if body != nil { + // Note: In case your wondering why we are not gzip encoding.... + // I'm not sure if these various services support gzip on send. + headers.Add("Content-Type", "application/json; charset=utf-8") + data, err := marshal(body) + if err != nil { + return fmt.Errorf("bug: conn.Call(): could not marshal the body object: %w", err) + } + req.Body = io.NopCloser(bytes.NewBuffer(data)) + req.Method = http.MethodPost + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data)) + } + } + return nil +} + +// XMLCall connects to an endpoint and decodes the XML response into resp. This is used when +// sending application/xml . If sending XML via SOAP, use SOAPCall(). +func (c *Client) XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error { + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/xml; charset=utf-8") // This was not set in he original Mex(), but... + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, "", resp) +} + +// SOAPCall returns the SOAP message given an endpoint, action, body of the request and the response object to marshal into. +func (c *Client) SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error { + if body == "" { + return fmt.Errorf("cannot make a SOAP call with body set to empty string") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/soap+xml; charset=utf-8") + headers.Set("SOAPAction", action) + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, body, resp) +} + +// xmlCall sends an XML in body and decodes into resp. This simply does the transport and relies on +// an upper level call to set things such as SOAP parameters and Content-Type, if required. +func (c *Client) xmlCall(ctx context.Context, u *url.URL, headers http.Header, body string, resp interface{}) error { + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if len(body) > 0 { + req.Method = http.MethodPost + req.Body = io.NopCloser(strings.NewReader(body)) + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + return xml.Unmarshal(data, resp) +} + +// URLFormCall is used to make a call where we need to send application/x-www-form-urlencoded data +// to the backend and receive JSON back. qv will be encoded into the request body. +func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error { + if len(qv) == 0 { + return fmt.Errorf("URLFormCall() requires qv to have non-zero length") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + + headers := http.Header{} + headers.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + addStdHeaders(headers) + + enc := qv.Encode() + + req := &http.Request{ + Method: http.MethodPost, + URL: u, + Header: headers, + ContentLength: int64(len(enc)), + Body: io.NopCloser(strings.NewReader(enc)), + GetBody: func() (io.ReadCloser, error) { + return io.NopCloser(strings.NewReader(enc)), nil + }, + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + unmarshal = customJSON.Unmarshal + } + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data)) + } + } + return nil +} + +// do makes the HTTP call to the server and returns the contents of the body. +func (c *Client) do(ctx context.Context, req *http.Request) ([]byte, error) { + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 30*time.Second) + defer cancel() + } + req = req.WithContext(ctx) + + reply, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("server response error:\n %w", err) + } + defer reply.Body.Close() + + data, err := c.readBody(reply) + if err != nil { + return nil, fmt.Errorf("could not read the body of an HTTP Response: %w", err) + } + reply.Body = io.NopCloser(bytes.NewBuffer(data)) + + // NOTE: This doesn't happen immediately after the call so that we can get an error message + // from the server and include it in our error. + switch reply.StatusCode { + case 200, 201: + default: + sd := strings.TrimSpace(string(data)) + if sd != "" { + // We probably have the error in the body. + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, sd), + } + } + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, reply.StatusCode), + } + } + + return data, nil +} + +// checkResp checks a response object o make sure it is a pointer to a struct. +func (c *Client) checkResp(v reflect.Value) error { + if v.Kind() != reflect.Ptr { + return fmt.Errorf("bug: resp argument must a *struct, was %T", v.Interface()) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: resp argument must be a *struct, was %T", v.Interface()) + } + return nil +} + +// readBody reads the body out of an *http.Response. It supports gzip encoded responses. +func (c *Client) readBody(resp *http.Response) ([]byte, error) { + var reader io.Reader = resp.Body + switch resp.Header.Get("Content-Encoding") { + case "": + // Do nothing + case "gzip": + reader = gzipDecompress(resp.Body) + default: + return nil, fmt.Errorf("bug: comm.Client.JSONCall(): content was send with unsupported content-encoding %s", resp.Header.Get("Content-Encoding")) + } + return io.ReadAll(reader) +} + +var testID string + +// addStdHeaders adds the standard headers we use on all calls. +func addStdHeaders(headers http.Header) http.Header { + headers.Set("Accept-Encoding", "gzip") + // So that I can have a static id for tests. + if testID != "" { + headers.Set("client-request-id", testID) + headers.Set("Return-Client-Request-Id", "false") + } else { + headers.Set("client-request-id", uuid.New().String()) + headers.Set("Return-Client-Request-Id", "false") + } + headers.Set("x-client-sku", "MSAL.Go") + headers.Set("x-client-os", runtime.GOOS) + headers.Set("x-client-cpu", runtime.GOARCH) + headers.Set("x-client-ver", version.Version) + return headers +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go new file mode 100644 index 00000000..4d3dbfcf --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package comm + +import ( + "compress/gzip" + "io" +) + +func gzipDecompress(r io.Reader) io.Reader { + gzipReader, _ := gzip.NewReader(r) + + pipeOut, pipeIn := io.Pipe() + go func() { + // decompression bomb would have to come from Azure services. + // If we want to limit, we should do that in comm.do(). + _, err := io.Copy(pipeIn, gzipReader) //nolint + if err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + gzipReader.Close() + return + } + if err := gzipReader.Close(); err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + return + } + pipeIn.Close() + }() + return pipeOut +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go new file mode 100644 index 00000000..b628f61a --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package grant holds types of grants issued by authorization services. +package grant + +const ( + Password = "password" + JWT = "urn:ietf:params:oauth:grant-type:jwt-bearer" + SAMLV1 = "urn:ietf:params:oauth:grant-type:saml1_1-bearer" + SAMLV2 = "urn:ietf:params:oauth:grant-type:saml2-bearer" + DeviceCode = "device_code" + AuthCode = "authorization_code" + RefreshToken = "refresh_token" + ClientCredential = "client_credentials" + ClientAssertion = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" +) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go new file mode 100644 index 00000000..1f9c543f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package ops provides operations to various backend services using REST clients. + +The REST type provides several clients that can be used to communicate to backends. +Usage is simple: + + rest := ops.New() + + // Creates an authority client and calls the UserRealm() method. + userRealm, err := rest.Authority().UserRealm(ctx, authParameters) + if err != nil { + // Do something + } +*/ +package ops + +import ( + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient = comm.HTTPClient + +// REST provides REST clients for communicating with various backends used by MSAL. +type REST struct { + client *comm.Client +} + +// New is the constructor for REST. +func New(httpClient HTTPClient) *REST { + return &REST{client: comm.New(httpClient)} +} + +// Authority returns a client for querying information about various authorities. +func (r *REST) Authority() authority.Client { + return authority.Client{Comm: r.client} +} + +// AccessTokens returns a client that can be used to get various access tokens for +// authorization purposes. +func (r *REST) AccessTokens() accesstokens.Client { + return accesstokens.Client{Comm: r.client} +} + +// WSTrust provides access to various metadata in a WSTrust service. This data can +// be used to gain tokens based on SAML data using the client provided by AccessTokens(). +func (r *REST) WSTrust() wstrust.Client { + return wstrust.Client{Comm: r.client} +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go new file mode 100644 index 00000000..a2bb6278 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=endpointType"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[etUnknown-0] + _ = x[etUsernamePassword-1] + _ = x[etWindowsTransport-2] +} + +const _endpointType_name = "etUnknownetUsernamePasswordetWindowsTransport" + +var _endpointType_index = [...]uint8{0, 9, 27, 45} + +func (i endpointType) String() string { + if i < 0 || i >= endpointType(len(_endpointType_index)-1) { + return "endpointType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _endpointType_name[_endpointType_index[i]:_endpointType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go new file mode 100644 index 00000000..64972700 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go @@ -0,0 +1,394 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +type Definitions struct { + XMLName xml.Name `xml:"definitions"` + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + TargetNamespace string `xml:"targetNamespace,attr"` + WSDL string `xml:"wsdl,attr"` + XSD string `xml:"xsd,attr"` + T string `xml:"t,attr"` + SOAPENC string `xml:"soapenc,attr"` + SOAP string `xml:"soap,attr"` + TNS string `xml:"tns,attr"` + MSC string `xml:"msc,attr"` + WSAM string `xml:"wsam,attr"` + SOAP12 string `xml:"soap12,attr"` + WSA10 string `xml:"wsa10,attr"` + WSA string `xml:"wsa,attr"` + WSAW string `xml:"wsaw,attr"` + WSX string `xml:"wsx,attr"` + WSAP string `xml:"wsap,attr"` + WSU string `xml:"wsu,attr"` + Trust string `xml:"trust,attr"` + WSP string `xml:"wsp,attr"` + Policy []Policy `xml:"Policy"` + Types Types `xml:"types"` + Message []Message `xml:"message"` + PortType []PortType `xml:"portType"` + Binding []Binding `xml:"binding"` + Service Service `xml:"service"` +} + +type Policy struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + ExactlyOne ExactlyOne `xml:"ExactlyOne"` +} + +type ExactlyOne struct { + Text string `xml:",chardata"` + All All `xml:"All"` +} + +type All struct { + Text string `xml:",chardata"` + NegotiateAuthentication NegotiateAuthentication `xml:"NegotiateAuthentication"` + TransportBinding TransportBinding `xml:"TransportBinding"` + UsingAddressing Text `xml:"UsingAddressing"` + EndorsingSupportingTokens EndorsingSupportingTokens `xml:"EndorsingSupportingTokens"` + WSS11 WSS11 `xml:"Wss11"` + Trust10 Trust10 `xml:"Trust10"` + SignedSupportingTokens SignedSupportingTokens `xml:"SignedSupportingTokens"` + Trust13 WSTrust13 `xml:"Trust13"` + SignedEncryptedSupportingTokens SignedEncryptedSupportingTokens `xml:"SignedEncryptedSupportingTokens"` +} + +type NegotiateAuthentication struct { + Text string `xml:",chardata"` + HTTP string `xml:"http,attr"` + XMLName xml.Name +} + +type TransportBinding struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy TransportBindingPolicy `xml:"Policy"` +} + +type TransportBindingPolicy struct { + Text string `xml:",chardata"` + TransportToken TransportToken `xml:"TransportToken"` + AlgorithmSuite AlgorithmSuite `xml:"AlgorithmSuite"` + Layout Layout `xml:"Layout"` + IncludeTimestamp Text `xml:"IncludeTimestamp"` +} + +type TransportToken struct { + Text string `xml:",chardata"` + Policy TransportTokenPolicy `xml:"Policy"` +} + +type TransportTokenPolicy struct { + Text string `xml:",chardata"` + HTTPSToken HTTPSToken `xml:"HttpsToken"` +} + +type HTTPSToken struct { + Text string `xml:",chardata"` + RequireClientCertificate string `xml:"RequireClientCertificate,attr"` +} + +type AlgorithmSuite struct { + Text string `xml:",chardata"` + Policy AlgorithmSuitePolicy `xml:"Policy"` +} + +type AlgorithmSuitePolicy struct { + Text string `xml:",chardata"` + Basic256 Text `xml:"Basic256"` + Basic128 Text `xml:"Basic128"` +} + +type Layout struct { + Text string `xml:",chardata"` + Policy LayoutPolicy `xml:"Policy"` +} + +type LayoutPolicy struct { + Text string `xml:",chardata"` + Strict Text `xml:"Strict"` +} + +type EndorsingSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy EndorsingSupportingTokensPolicy `xml:"Policy"` +} + +type EndorsingSupportingTokensPolicy struct { + Text string `xml:",chardata"` + X509Token X509Token `xml:"X509Token"` + RSAToken RSAToken `xml:"RsaToken"` + SignedParts SignedParts `xml:"SignedParts"` + KerberosToken KerberosToken `xml:"KerberosToken"` + IssuedToken IssuedToken `xml:"IssuedToken"` + KeyValueToken KeyValueToken `xml:"KeyValueToken"` +} + +type X509Token struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy X509TokenPolicy `xml:"Policy"` +} + +type X509TokenPolicy struct { + Text string `xml:",chardata"` + RequireThumbprintReference Text `xml:"RequireThumbprintReference"` + WSSX509V3Token10 Text `xml:"WssX509V3Token10"` +} + +type RSAToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` + MSSP string `xml:"mssp,attr"` +} + +type SignedParts struct { + Text string `xml:",chardata"` + Header SignedPartsHeader `xml:"Header"` +} + +type SignedPartsHeader struct { + Text string `xml:",chardata"` + Name string `xml:"Name,attr"` + Namespace string `xml:"Namespace,attr"` +} + +type KerberosToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy KerberosTokenPolicy `xml:"Policy"` +} + +type KerberosTokenPolicy struct { + Text string `xml:",chardata"` + WSSGSSKerberosV5ApReqToken11 Text `xml:"WssGssKerberosV5ApReqToken11"` +} + +type IssuedToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + RequestSecurityTokenTemplate RequestSecurityTokenTemplate `xml:"RequestSecurityTokenTemplate"` + Policy IssuedTokenPolicy `xml:"Policy"` +} + +type RequestSecurityTokenTemplate struct { + Text string `xml:",chardata"` + KeyType Text `xml:"KeyType"` + EncryptWith Text `xml:"EncryptWith"` + SignatureAlgorithm Text `xml:"SignatureAlgorithm"` + CanonicalizationAlgorithm Text `xml:"CanonicalizationAlgorithm"` + EncryptionAlgorithm Text `xml:"EncryptionAlgorithm"` + KeySize Text `xml:"KeySize"` + KeyWrapAlgorithm Text `xml:"KeyWrapAlgorithm"` +} + +type IssuedTokenPolicy struct { + Text string `xml:",chardata"` + RequireInternalReference Text `xml:"RequireInternalReference"` +} + +type KeyValueToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` +} + +type WSS11 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Wss11Policy `xml:"Policy"` +} + +type Wss11Policy struct { + Text string `xml:",chardata"` + MustSupportRefThumbprint Text `xml:"MustSupportRefThumbprint"` +} + +type Trust10 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Trust10Policy `xml:"Policy"` +} + +type Trust10Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type SupportingTokensPolicy struct { + Text string `xml:",chardata"` + UsernameToken UsernameToken `xml:"UsernameToken"` +} +type UsernameToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy UsernameTokenPolicy `xml:"Policy"` +} + +type UsernameTokenPolicy struct { + Text string `xml:",chardata"` + WSSUsernameToken10 WSSUsernameToken10 `xml:"WssUsernameToken10"` +} + +type WSSUsernameToken10 struct { + Text string `xml:",chardata"` + XMLName xml.Name +} + +type WSTrust13 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy WSTrust13Policy `xml:"Policy"` +} + +type WSTrust13Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedEncryptedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type Types struct { + Text string `xml:",chardata"` + Schema Schema `xml:"schema"` +} + +type Schema struct { + Text string `xml:",chardata"` + TargetNamespace string `xml:"targetNamespace,attr"` + Import []Import `xml:"import"` +} + +type Import struct { + Text string `xml:",chardata"` + SchemaLocation string `xml:"schemaLocation,attr"` + Namespace string `xml:"namespace,attr"` +} + +type Message struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Part Part `xml:"part"` +} + +type Part struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Element string `xml:"element,attr"` +} + +type PortType struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation Operation `xml:"operation"` +} + +type Operation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Input OperationIO `xml:"input"` + Output OperationIO `xml:"output"` +} + +type OperationIO struct { + Text string `xml:",chardata"` + Action string `xml:"Action,attr"` + Message string `xml:"message,attr"` + Body OperationIOBody `xml:"body"` +} + +type OperationIOBody struct { + Text string `xml:",chardata"` + Use string `xml:"use,attr"` +} + +type Binding struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + PolicyReference PolicyReference `xml:"PolicyReference"` + Binding DefinitionsBinding `xml:"binding"` + Operation BindingOperation `xml:"operation"` +} + +type PolicyReference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` +} + +type DefinitionsBinding struct { + Text string `xml:",chardata"` + Transport string `xml:"transport,attr"` +} + +type BindingOperation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation BindingOperationOperation `xml:"operation"` + Input BindingOperationIO `xml:"input"` + Output BindingOperationIO `xml:"output"` +} + +type BindingOperationOperation struct { + Text string `xml:",chardata"` + SoapAction string `xml:"soapAction,attr"` + Style string `xml:"style,attr"` +} + +type BindingOperationIO struct { + Text string `xml:",chardata"` + Body OperationIOBody `xml:"body"` +} + +type Service struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Port []Port `xml:"port"` +} + +type Port struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Binding string `xml:"binding,attr"` + Address Address `xml:"address"` + EndpointReference PortEndpointReference `xml:"EndpointReference"` +} + +type Address struct { + Text string `xml:",chardata"` + Location string `xml:"location,attr"` +} + +type PortEndpointReference struct { + Text string `xml:",chardata"` + Address Text `xml:"Address"` + Identity Identity `xml:"Identity"` +} + +type Identity struct { + Text string `xml:",chardata"` + XMLNS string `xml:"xmlns,attr"` + SPN Text `xml:"Spn"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go new file mode 100644 index 00000000..7d072556 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go @@ -0,0 +1,230 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +// TODO(msal): Someone (and it ain't gonna be me) needs to document these attributes or +// at the least put a link to RFC. + +type SAMLDefinitions struct { + XMLName xml.Name `xml:"Envelope"` + Text string `xml:",chardata"` + S string `xml:"s,attr"` + A string `xml:"a,attr"` + U string `xml:"u,attr"` + Header Header `xml:"Header"` + Body Body `xml:"Body"` +} + +type Header struct { + Text string `xml:",chardata"` + Action Action `xml:"Action"` + Security Security `xml:"Security"` +} + +type Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` +} + +type Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` + O string `xml:"o,attr"` + Timestamp Timestamp `xml:"Timestamp"` +} + +type Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + Created Text `xml:"Created"` + Expires Text `xml:"Expires"` +} + +type Text struct { + Text string `xml:",chardata"` +} + +type Body struct { + Text string `xml:",chardata"` + RequestSecurityTokenResponseCollection RequestSecurityTokenResponseCollection `xml:"RequestSecurityTokenResponseCollection"` +} + +type RequestSecurityTokenResponseCollection struct { + Text string `xml:",chardata"` + Trust string `xml:"trust,attr"` + RequestSecurityTokenResponse []RequestSecurityTokenResponse `xml:"RequestSecurityTokenResponse"` +} + +type RequestSecurityTokenResponse struct { + Text string `xml:",chardata"` + Lifetime Lifetime `xml:"Lifetime"` + AppliesTo AppliesTo `xml:"AppliesTo"` + RequestedSecurityToken RequestedSecurityToken `xml:"RequestedSecurityToken"` + RequestedAttachedReference RequestedAttachedReference `xml:"RequestedAttachedReference"` + RequestedUnattachedReference RequestedUnattachedReference `xml:"RequestedUnattachedReference"` + TokenType Text `xml:"TokenType"` + RequestType Text `xml:"RequestType"` + KeyType Text `xml:"KeyType"` +} + +type Lifetime struct { + Text string `xml:",chardata"` + Created WSUTimestamp `xml:"Created"` + Expires WSUTimestamp `xml:"Expires"` +} + +type WSUTimestamp struct { + Text string `xml:",chardata"` + Wsu string `xml:"wsu,attr"` +} + +type AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"wsp,attr"` + EndpointReference EndpointReference `xml:"EndpointReference"` +} + +type EndpointReference struct { + Text string `xml:",chardata"` + Wsa string `xml:"wsa,attr"` + Address Text `xml:"Address"` +} + +type RequestedSecurityToken struct { + Text string `xml:",chardata"` + AssertionRawXML string `xml:",innerxml"` + Assertion Assertion `xml:"Assertion"` +} + +type Assertion struct { + XMLName xml.Name // Normally its `xml:"Assertion"`, but I think they want to capture the xmlns + Text string `xml:",chardata"` + MajorVersion string `xml:"MajorVersion,attr"` + MinorVersion string `xml:"MinorVersion,attr"` + AssertionID string `xml:"AssertionID,attr"` + Issuer string `xml:"Issuer,attr"` + IssueInstant string `xml:"IssueInstant,attr"` + Saml string `xml:"saml,attr"` + Conditions Conditions `xml:"Conditions"` + AttributeStatement AttributeStatement `xml:"AttributeStatement"` + AuthenticationStatement AuthenticationStatement `xml:"AuthenticationStatement"` + Signature Signature `xml:"Signature"` +} + +type Conditions struct { + Text string `xml:",chardata"` + NotBefore string `xml:"NotBefore,attr"` + NotOnOrAfter string `xml:"NotOnOrAfter,attr"` + AudienceRestrictionCondition AudienceRestrictionCondition `xml:"AudienceRestrictionCondition"` +} + +type AudienceRestrictionCondition struct { + Text string `xml:",chardata"` + Audience Text `xml:"Audience"` +} + +type AttributeStatement struct { + Text string `xml:",chardata"` + Subject Subject `xml:"Subject"` + Attribute []Attribute `xml:"Attribute"` +} + +type Subject struct { + Text string `xml:",chardata"` + NameIdentifier NameIdentifier `xml:"NameIdentifier"` + SubjectConfirmation SubjectConfirmation `xml:"SubjectConfirmation"` +} + +type NameIdentifier struct { + Text string `xml:",chardata"` + Format string `xml:"Format,attr"` +} + +type SubjectConfirmation struct { + Text string `xml:",chardata"` + ConfirmationMethod Text `xml:"ConfirmationMethod"` +} + +type Attribute struct { + Text string `xml:",chardata"` + AttributeName string `xml:"AttributeName,attr"` + AttributeNamespace string `xml:"AttributeNamespace,attr"` + AttributeValue Text `xml:"AttributeValue"` +} + +type AuthenticationStatement struct { + Text string `xml:",chardata"` + AuthenticationMethod string `xml:"AuthenticationMethod,attr"` + AuthenticationInstant string `xml:"AuthenticationInstant,attr"` + Subject Subject `xml:"Subject"` +} + +type Signature struct { + Text string `xml:",chardata"` + Ds string `xml:"ds,attr"` + SignedInfo SignedInfo `xml:"SignedInfo"` + SignatureValue Text `xml:"SignatureValue"` + KeyInfo KeyInfo `xml:"KeyInfo"` +} + +type SignedInfo struct { + Text string `xml:",chardata"` + CanonicalizationMethod Method `xml:"CanonicalizationMethod"` + SignatureMethod Method `xml:"SignatureMethod"` + Reference Reference `xml:"Reference"` +} + +type Method struct { + Text string `xml:",chardata"` + Algorithm string `xml:"Algorithm,attr"` +} + +type Reference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` + Transforms Transforms `xml:"Transforms"` + DigestMethod Method `xml:"DigestMethod"` + DigestValue Text `xml:"DigestValue"` +} + +type Transforms struct { + Text string `xml:",chardata"` + Transform []Method `xml:"Transform"` +} + +type KeyInfo struct { + Text string `xml:",chardata"` + Xmlns string `xml:"xmlns,attr"` + X509Data X509Data `xml:"X509Data"` +} + +type X509Data struct { + Text string `xml:",chardata"` + X509Certificate Text `xml:"X509Certificate"` +} + +type RequestedAttachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} + +type SecurityTokenReference struct { + Text string `xml:",chardata"` + TokenType string `xml:"TokenType,attr"` + O string `xml:"o,attr"` + K string `xml:"k,attr"` + KeyIdentifier KeyIdentifier `xml:"KeyIdentifier"` +} + +type KeyIdentifier struct { + Text string `xml:",chardata"` + ValueType string `xml:"ValueType,attr"` +} + +type RequestedUnattachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go new file mode 100644 index 00000000..6fe5efa8 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Version"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TrustUnknown-0] + _ = x[Trust2005-1] + _ = x[Trust13-2] +} + +const _Version_name = "TrustUnknownTrust2005Trust13" + +var _Version_index = [...]uint8{0, 12, 21, 28} + +func (i Version) String() string { + if i < 0 || i >= Version(len(_Version_index)-1) { + return "Version(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Version_name[_Version_index[i]:_Version_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go new file mode 100644 index 00000000..8fad5efb --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go @@ -0,0 +1,199 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "encoding/xml" + "fmt" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + uuid "github.com/google/uuid" +) + +//go:generate stringer -type=Version + +type Version int + +const ( + TrustUnknown Version = iota + Trust2005 + Trust13 +) + +// Endpoint represents a WSTrust endpoint. +type Endpoint struct { + // Version is the version of the endpoint. + Version Version + // URL is the URL of the endpoint. + URL string +} + +type wsTrustTokenRequestEnvelope struct { + XMLName xml.Name `xml:"s:Envelope"` + Text string `xml:",chardata"` + S string `xml:"xmlns:s,attr"` + Wsa string `xml:"xmlns:wsa,attr"` + Wsu string `xml:"xmlns:wsu,attr"` + Header struct { + Text string `xml:",chardata"` + Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:Action"` + MessageID struct { + Text string `xml:",chardata"` + } `xml:"wsa:messageID"` + ReplyTo struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:ReplyTo"` + To struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:To"` + Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + Wsse string `xml:"xmlns:wsse,attr"` + Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Created struct { + Text string `xml:",chardata"` + } `xml:"wsu:Created"` + Expires struct { + Text string `xml:",chardata"` + } `xml:"wsu:Expires"` + } `xml:"wsu:Timestamp"` + UsernameToken struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Username struct { + Text string `xml:",chardata"` + } `xml:"wsse:Username"` + Password struct { + Text string `xml:",chardata"` + } `xml:"wsse:Password"` + } `xml:"wsse:UsernameToken"` + } `xml:"wsse:Security"` + } `xml:"s:Header"` + Body struct { + Text string `xml:",chardata"` + RequestSecurityToken struct { + Text string `xml:",chardata"` + Wst string `xml:"xmlns:wst,attr"` + AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"xmlns:wsp,attr"` + EndpointReference struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:EndpointReference"` + } `xml:"wsp:AppliesTo"` + KeyType struct { + Text string `xml:",chardata"` + } `xml:"wst:KeyType"` + RequestType struct { + Text string `xml:",chardata"` + } `xml:"wst:RequestType"` + } `xml:"wst:RequestSecurityToken"` + } `xml:"s:Body"` +} + +func buildTimeString(t time.Time) string { + // Golang time formats are weird: https://stackoverflow.com/questions/20234104/how-to-format-current-time-using-a-yyyymmddhhmmss-format + return t.Format("2006-01-02T15:04:05.000Z") +} + +func (wte *Endpoint) buildTokenRequestMessage(authType authority.AuthorizeType, cloudAudienceURN string, username string, password string) (string, error) { + var soapAction string + var trustNamespace string + var keyType string + var requestType string + + createdTime := time.Now().UTC() + expiresTime := createdTime.Add(10 * time.Minute) + + switch wte.Version { + case Trust2005: + soapAction = trust2005Spec + trustNamespace = "http://schemas.xmlsoap.org/ws/2005/02/trust" + keyType = "http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey" + requestType = "http://schemas.xmlsoap.org/ws/2005/02/trust/Issue" + case Trust13: + soapAction = trust13Spec + trustNamespace = "http://docs.oasis-open.org/ws-sx/ws-trust/200512" + keyType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Bearer" + requestType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Issue" + default: + return "", fmt.Errorf("buildTokenRequestMessage had Version == %q, which is not recognized", wte.Version) + } + + var envelope wsTrustTokenRequestEnvelope + + messageUUID := uuid.New() + + envelope.S = "http://www.w3.org/2003/05/soap-envelope" + envelope.Wsa = "http://www.w3.org/2005/08/addressing" + envelope.Wsu = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd" + + envelope.Header.Action.MustUnderstand = "1" + envelope.Header.Action.Text = soapAction + envelope.Header.MessageID.Text = "urn:uuid:" + messageUUID.String() + envelope.Header.ReplyTo.Address.Text = "http://www.w3.org/2005/08/addressing/anonymous" + envelope.Header.To.MustUnderstand = "1" + envelope.Header.To.Text = wte.URL + + switch authType { + case authority.ATUnknown: + return "", fmt.Errorf("buildTokenRequestMessage had no authority type(%v)", authType) + case authority.ATUsernamePassword: + endpointUUID := uuid.New() + + var trustID string + if wte.Version == Trust2005 { + trustID = "UnPwSecTok2005-" + endpointUUID.String() + } else { + trustID = "UnPwSecTok13-" + endpointUUID.String() + } + + envelope.Header.Security.MustUnderstand = "1" + envelope.Header.Security.Wsse = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" + envelope.Header.Security.Timestamp.ID = "MSATimeStamp" + envelope.Header.Security.Timestamp.Created.Text = buildTimeString(createdTime) + envelope.Header.Security.Timestamp.Expires.Text = buildTimeString(expiresTime) + envelope.Header.Security.UsernameToken.ID = trustID + envelope.Header.Security.UsernameToken.Username.Text = username + envelope.Header.Security.UsernameToken.Password.Text = password + default: + // This is just to note that we don't do anything for other cases. + // We aren't missing anything I know of. + } + + envelope.Body.RequestSecurityToken.Wst = trustNamespace + envelope.Body.RequestSecurityToken.AppliesTo.Wsp = "http://schemas.xmlsoap.org/ws/2004/09/policy" + envelope.Body.RequestSecurityToken.AppliesTo.EndpointReference.Address.Text = cloudAudienceURN + envelope.Body.RequestSecurityToken.KeyType.Text = keyType + envelope.Body.RequestSecurityToken.RequestType.Text = requestType + + output, err := xml.Marshal(envelope) + if err != nil { + return "", err + } + + return string(output), nil +} + +func (wte *Endpoint) BuildTokenRequestMessageWIA(cloudAudienceURN string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATWindowsIntegrated, cloudAudienceURN, "", "") +} + +func (wte *Endpoint) BuildTokenRequestMessageUsernamePassword(cloudAudienceURN string, username string, password string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATUsernamePassword, cloudAudienceURN, username, password) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go new file mode 100644 index 00000000..e3d19886 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go @@ -0,0 +1,159 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "errors" + "fmt" + "strings" +) + +//go:generate stringer -type=endpointType + +type endpointType int + +const ( + etUnknown endpointType = iota + etUsernamePassword + etWindowsTransport +) + +type wsEndpointData struct { + Version Version + EndpointType endpointType +} + +const trust13Spec string = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" +const trust2005Spec string = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" + +type MexDocument struct { + UsernamePasswordEndpoint Endpoint + WindowsTransportEndpoint Endpoint + policies map[string]endpointType + bindings map[string]wsEndpointData +} + +func updateEndpoint(cached *Endpoint, found Endpoint) { + if cached == nil || cached.Version == TrustUnknown { + *cached = found + return + } + if (*cached).Version == Trust2005 && found.Version == Trust13 { + *cached = found + return + } +} + +// TODO(msal): Someone needs to write tests for everything below. + +// NewFromDef creates a new MexDocument. +func NewFromDef(defs Definitions) (MexDocument, error) { + policies, err := policies(defs) + if err != nil { + return MexDocument{}, err + } + + bindings, err := bindings(defs, policies) + if err != nil { + return MexDocument{}, err + } + + userPass, windows, err := endpoints(defs, bindings) + if err != nil { + return MexDocument{}, err + } + + return MexDocument{ + UsernamePasswordEndpoint: userPass, + WindowsTransportEndpoint: windows, + policies: policies, + bindings: bindings, + }, nil +} + +func policies(defs Definitions) (map[string]endpointType, error) { + policies := make(map[string]endpointType, len(defs.Policy)) + + for _, policy := range defs.Policy { + if policy.ExactlyOne.All.NegotiateAuthentication.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etWindowsTransport + } + } + + if policy.ExactlyOne.All.SignedEncryptedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + if policy.ExactlyOne.All.SignedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + } + + if len(policies) == 0 { + return policies, errors.New("no policies for mex document") + } + + return policies, nil +} + +func bindings(defs Definitions, policies map[string]endpointType) (map[string]wsEndpointData, error) { + bindings := make(map[string]wsEndpointData, len(defs.Binding)) + + for _, binding := range defs.Binding { + policyName := binding.PolicyReference.URI + transport := binding.Binding.Transport + + if transport == "http://schemas.xmlsoap.org/soap/http" { + if policy, ok := policies[policyName]; ok { + bindingName := binding.Name + specVersion := binding.Operation.Operation.SoapAction + + if specVersion == trust13Spec { + bindings[bindingName] = wsEndpointData{Trust13, policy} + } else if specVersion == trust2005Spec { + bindings[bindingName] = wsEndpointData{Trust2005, policy} + } else { + return nil, errors.New("found unknown spec version in mex document") + } + } + } + } + return bindings, nil +} + +func endpoints(defs Definitions, bindings map[string]wsEndpointData) (userPass, windows Endpoint, err error) { + for _, port := range defs.Service.Port { + bindingName := port.Binding + + index := strings.Index(bindingName, ":") + if index != -1 { + bindingName = bindingName[index+1:] + } + + if binding, ok := bindings[bindingName]; ok { + url := strings.TrimSpace(port.EndpointReference.Address.Text) + if url == "" { + return Endpoint{}, Endpoint{}, fmt.Errorf("MexDocument cannot have blank URL endpoint") + } + if binding.Version == TrustUnknown { + return Endpoint{}, Endpoint{}, fmt.Errorf("endpoint version unknown") + } + endpoint := Endpoint{Version: binding.Version, URL: url} + + switch binding.EndpointType { + case etUsernamePassword: + updateEndpoint(&userPass, endpoint) + case etWindowsTransport: + updateEndpoint(&windows, endpoint) + default: + return Endpoint{}, Endpoint{}, errors.New("found unknown port type in MEX document") + } + } + } + return userPass, windows, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go new file mode 100644 index 00000000..47cd4c69 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package wstrust provides a client for communicating with a WSTrust (https://en.wikipedia.org/wiki/WS-Trust#:~:text=WS%2DTrust%20is%20a%20WS,in%20a%20secure%20message%20exchange.) +for the purposes of extracting metadata from the service. This data can be used to acquire +tokens using the accesstokens.Client.GetAccessTokenFromSamlGrant() call. +*/ +package wstrust + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" +) + +type xmlCaller interface { + XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error + SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error +} + +type SamlTokenInfo struct { + AssertionType string // Should be either constants SAMLV1Grant or SAMLV2Grant. + Assertion string +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm xmlCaller +} + +// TODO(msal): This allows me to call Mex without having a real Def file on line 45. +// This would fail because policies() would not find a policy. This is easy enough to +// fix in test data, but.... Definitions is defined with built in structs. That needs +// to be pulled apart and until then I have this hack in. +var newFromDef = defs.NewFromDef + +// Mex provides metadata about a wstrust service. +func (c Client) Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) { + resp := defs.Definitions{} + err := c.Comm.XMLCall( + ctx, + federationMetadataURL, + http.Header{}, + nil, + &resp, + ) + if err != nil { + return defs.MexDocument{}, err + } + + return newFromDef(resp) +} + +const ( + SoapActionDefault = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" + + // Note: Commented out because this action is not supported. It was in the original code + // but only used in a switch where it errored. Since there was only one value, a default + // worked better. However, buildTokenRequestMessage() had 2005 support. I'm not actually + // sure what's going on here. It like we have half support. For now this is here just + // for documentation purposes in case we are going to add support. + // + // SoapActionWSTrust2005 = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" +) + +// SAMLTokenInfo provides SAML information that is used to generate a SAML token. +func (c Client) SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (SamlTokenInfo, error) { + var wsTrustRequestMessage string + var err error + + switch authParameters.AuthorizationType { + case authority.ATWindowsIntegrated: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageWIA(cloudAudienceURN) + if err != nil { + return SamlTokenInfo{}, err + } + case authority.ATUsernamePassword: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageUsernamePassword( + cloudAudienceURN, authParameters.Username, authParameters.Password) + if err != nil { + return SamlTokenInfo{}, err + } + default: + return SamlTokenInfo{}, fmt.Errorf("unknown auth type %v", authParameters.AuthorizationType) + } + + var soapAction string + switch endpoint.Version { + case defs.Trust13: + soapAction = SoapActionDefault + case defs.Trust2005: + return SamlTokenInfo{}, errors.New("WS Trust 2005 support is not implemented") + default: + return SamlTokenInfo{}, fmt.Errorf("the SOAP endpoint for a wstrust call had an invalid version: %v", endpoint.Version) + } + + resp := defs.SAMLDefinitions{} + err = c.Comm.SOAPCall(ctx, endpoint.URL, soapAction, http.Header{}, nil, wsTrustRequestMessage, &resp) + if err != nil { + return SamlTokenInfo{}, err + } + + return c.samlAssertion(resp) +} + +const ( + samlv1Assertion = "urn:oasis:names:tc:SAML:1.0:assertion" + samlv2Assertion = "urn:oasis:names:tc:SAML:2.0:assertion" +) + +func (c Client) samlAssertion(def defs.SAMLDefinitions) (SamlTokenInfo, error) { + for _, tokenResponse := range def.Body.RequestSecurityTokenResponseCollection.RequestSecurityTokenResponse { + token := tokenResponse.RequestedSecurityToken + if token.Assertion.XMLName.Local != "" { + assertion := token.AssertionRawXML + + samlVersion := token.Assertion.Saml + switch samlVersion { + case samlv1Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV1, Assertion: assertion}, nil + case samlv2Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV2, Assertion: assertion}, nil + } + return SamlTokenInfo{}, fmt.Errorf("couldn't parse SAML assertion, version unknown: %q", samlVersion) + } + } + return SamlTokenInfo{}, errors.New("unknown WS-Trust version") +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go new file mode 100644 index 00000000..0ade4117 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// TODO(msal): Write some tests. The original code this came from didn't have tests and I'm too +// tired at this point to do it. It, like many other *Manager code I found was broken because +// they didn't have mutex protection. + +package oauth + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" +) + +// ADFS is an active directory federation service authority type. +const ADFS = "ADFS" + +type cacheEntry struct { + Endpoints authority.Endpoints + ValidForDomainsInList map[string]bool +} + +func createcacheEntry(endpoints authority.Endpoints) cacheEntry { + return cacheEntry{endpoints, map[string]bool{}} +} + +// AuthorityEndpoint retrieves endpoints from an authority for auth and token acquisition. +type authorityEndpoint struct { + rest *ops.REST + + mu sync.Mutex + cache map[string]cacheEntry +} + +// newAuthorityEndpoint is the constructor for AuthorityEndpoint. +func newAuthorityEndpoint(rest *ops.REST) *authorityEndpoint { + m := &authorityEndpoint{rest: rest, cache: map[string]cacheEntry{}} + return m +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance +func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + + if endpoints, found := m.cachedEndpoints(authorityInfo, userPrincipalName); found { + return endpoints, nil + } + + endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName) + if err != nil { + return authority.Endpoints{}, err + } + + resp, err := m.rest.Authority().GetTenantDiscoveryResponse(ctx, endpoint) + if err != nil { + return authority.Endpoints{}, err + } + if err := resp.Validate(); err != nil { + return authority.Endpoints{}, fmt.Errorf("ResolveEndpoints(): %w", err) + } + + tenant := authorityInfo.Tenant + + endpoints := authority.NewEndpoints( + strings.Replace(resp.AuthorizationEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.TokenEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.Issuer, "{tenant}", tenant, -1), + authorityInfo.Host) + + m.addCachedEndpoints(authorityInfo, userPrincipalName, endpoints) + + return endpoints, nil +} + +// cachedEndpoints returns a the cached endpoints if they exists. If not, we return false. +func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, bool) { + m.mu.Lock() + defer m.mu.Unlock() + + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + if authorityInfo.AuthorityType == ADFS { + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok { + return cacheEntry.Endpoints, true + } + } + } + return cacheEntry.Endpoints, true + } + return authority.Endpoints{}, false +} + +func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, userPrincipalName string, endpoints authority.Endpoints) { + m.mu.Lock() + defer m.mu.Unlock() + + updatedCacheEntry := createcacheEntry(endpoints) + + if authorityInfo.AuthorityType == ADFS { + // Since we're here, we've made a call to the backend. We want to ensure we're caching + // the latest values from the server. + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + for k := range cacheEntry.ValidForDomainsInList { + updatedCacheEntry.ValidForDomainsInList[k] = true + } + } + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + updatedCacheEntry.ValidForDomainsInList[domain] = true + } + } + + m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry +} + +func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) { + if authorityInfo.Tenant == "adfs" { + return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil + } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + } else if authorityInfo.Region != "" { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + + } + + return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil +} + +func adfsDomainFromUpn(userPrincipalName string) (string, error) { + parts := strings.Split(userPrincipalName, "@") + if len(parts) < 2 { + return "", errors.New("no @ present in user principal name") + } + return parts[1], nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go new file mode 100644 index 00000000..4561d72d --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package options + +import ( + "errors" + "fmt" +) + +// CallOption implements an optional argument to a method call. See +// https://blog.devgenius.io/go-call-option-that-can-be-used-with-multiple-methods-6c81734f3dbe +// for an explanation of the usage pattern. +type CallOption interface { + Do(any) error + callOption() +} + +// ApplyOptions applies all the callOptions to options. options must be a pointer to a struct and +// callOptions must be a list of objects that implement CallOption. +func ApplyOptions[O, C any](options O, callOptions []C) error { + for _, o := range callOptions { + if t, ok := any(o).(CallOption); !ok { + return fmt.Errorf("unexpected option type %T", o) + } else if err := t.Do(options); err != nil { + return err + } + } + return nil +} + +// NewCallOption returns a new CallOption whose Do() method calls function "f". +func NewCallOption(f func(any) error) CallOption { + if f == nil { + // This isn't a practical concern because only an MSAL maintainer can get + // us here, by implementing a do-nothing option. But if someone does that, + // the below ensures the method invoked with the option returns an error. + return callOption(func(any) error { + return errors.New("invalid option: missing implementation") + }) + } + return callOption(f) +} + +// callOption is an adapter for a function to a CallOption +type callOption func(any) error + +func (c callOption) Do(a any) error { + return c(a) +} + +func (callOption) callOption() {} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go new file mode 100644 index 00000000..f7e12a71 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go @@ -0,0 +1,71 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package shared + +import ( + "net/http" + "reflect" + "strings" +) + +const ( + // CacheKeySeparator is used in creating the keys of the cache. + CacheKeySeparator = "-" +) + +type Account struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + LocalAccountID string `json:"local_account_id,omitempty"` + AuthorityType string `json:"authority_type,omitempty"` + PreferredUsername string `json:"username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + AlternativeID string `json:"alternative_account_id,omitempty"` + RawClientInfo string `json:"client_info,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccount creates an account. +func NewAccount(homeAccountID, env, realm, localAccountID, authorityType, username string) Account { + return Account{ + HomeAccountID: homeAccountID, + Environment: env, + Realm: realm, + LocalAccountID: localAccountID, + AuthorityType: authorityType, + PreferredUsername: username, + } +} + +// Key creates the key for storing accounts in the cache. +func (acc Account) Key() string { + return strings.Join([]string{acc.HomeAccountID, acc.Environment, acc.Realm}, CacheKeySeparator) +} + +// IsZero checks the zero value of account. +func (acc Account) IsZero() bool { + v := reflect.ValueOf(acc) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// DefaultClient is our default shared HTTP client. +var DefaultClient = &http.Client{} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go new file mode 100644 index 00000000..b76c0c56 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -0,0 +1,8 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package version keeps the version number of the client package. +package version + +// Version is the version of this client package that is communicated to the server. +const Version = "1.0.0" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go new file mode 100644 index 00000000..cce05277 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -0,0 +1,683 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package public provides a client for authentication of "public" applications. A "public" +application is defined as an app that runs on client devices (android, ios, windows, linux, ...). +These devices are "untrusted" and access resources via web APIs that must authenticate. +*/ +package public + +/* +Design note: + +public.Client uses client.Base as an embedded type. client.Base statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be includee in the package documentation. + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/url" + "strconv" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" + "github.com/google/uuid" + "github.com/pkg/browser" +) + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type Account = shared.Account + +// clientOptions configures the Client's behavior. +type clientOptions struct { + accessor cache.ExportReplace + authority string + capabilities []string + disableInstanceDiscovery bool + httpClient ops.HTTPClient +} + +func (p *clientOptions) validate() error { + u, err := url.Parse(p.authority) + if err != nil { + return fmt.Errorf("Authority options cannot be URL parsed: %w", err) + } + if u.Scheme != "https" { + return fmt.Errorf("Authority(%s) did not start with https://", u.String()) + } + return nil +} + +// Option is an optional argument to the New constructor. +type Option func(o *clientOptions) + +// WithAuthority allows for a custom authority to be set. This must be a valid https url. +func WithAuthority(authority string) Option { + return func(o *clientOptions) { + o.authority = authority + } +} + +// WithCache provides an accessor that will read and write authentication data to an externally managed cache. +func WithCache(accessor cache.ExportReplace) Option { + return func(o *clientOptions) { + o.accessor = accessor + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(o *clientOptions) { + // there's no danger of sharing the slice's underlying memory with the application because + // this slice is simply passed to base.WithClientCapabilities, which copies its data + o.capabilities = capabilities + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *clientOptions) { + o.httpClient = httpClient + } +} + +// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios) +func WithInstanceDiscovery(enabled bool) Option { + return func(o *clientOptions) { + o.disableInstanceDiscovery = !enabled + } +} + +// Client is a representation of authentication client for public applications as defined in the +// package doc. For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications. +type Client struct { + base base.Client +} + +// New is the constructor for Client. +func New(clientID string, options ...Option) (Client, error) { + opts := clientOptions{ + authority: base.AuthorityPublicCloud, + httpClient: shared.DefaultClient, + } + + for _, o := range options { + o(&opts) + } + if err := opts.validate(); err != nil { + return Client{}, err + } + + base, err := base.New(clientID, opts.authority, oauth.New(opts.httpClient), base.WithCacheAccessor(opts.accessor), base.WithClientCapabilities(opts.capabilities), base.WithInstanceDiscovery(!opts.disableInstanceDiscovery)) + if err != nil { + return Client{}, err + } + return Client{base}, nil +} + +// authCodeURLOptions contains options for AuthCodeURL +type authCodeURLOptions struct { + claims, loginHint, tenantID, domainHint string +} + +// AuthCodeURLOption is implemented by options for AuthCodeURL +type AuthCodeURLOption interface { + authCodeURLOption() +} + +// AuthCodeURL creates a URL used to acquire an authorization code. +// +// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID] +func (pca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) { + o := authCodeURLOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return "", err + } + ap, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return "", err + } + ap.Claims = o.claims + ap.LoginHint = o.loginHint + ap.DomainHint = o.domainHint + return pca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap) +} + +// WithClaims sets additional claims to request for the token, such as those required by conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +// This option is valid for any token acquisition method. +func WithClaims(claims string) interface { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.claims = claims + case *acquireTokenByDeviceCodeOptions: + t.claims = claims + case *acquireTokenByUsernamePasswordOptions: + t.claims = claims + case *acquireTokenSilentOptions: + t.claims = claims + case *authCodeURLOptions: + t.claims = claims + case *interactiveAuthOptions: + t.claims = claims + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New] by [WithAuthority]. +// This option is valid for any token acquisition method. +func WithTenantID(tenantID string) interface { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.tenantID = tenantID + case *acquireTokenByDeviceCodeOptions: + t.tenantID = tenantID + case *acquireTokenByUsernamePasswordOptions: + t.tenantID = tenantID + case *acquireTokenSilentOptions: + t.tenantID = tenantID + case *authCodeURLOptions: + t.tenantID = tenantID + case *interactiveAuthOptions: + t.tenantID = tenantID + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// acquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type acquireTokenSilentOptions struct { + account Account + claims, tenantID string +} + +// AcquireSilentOption is implemented by options for AcquireTokenSilent +type AcquireSilentOption interface { + acquireSilentOption() +} + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) interface { + AcquireSilentOption + options.CallOption +} { + return struct { + AcquireSilentOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.account = account + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +// +// Options: [WithClaims], [WithSilentAccount], [WithTenantID] +func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) { + o := acquireTokenSilentOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: o.account, + Claims: o.claims, + RequestType: accesstokens.ATPublic, + IsAppCache: false, + TenantID: o.tenantID, + } + + return pca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// acquireTokenByUsernamePasswordOptions contains optional configuration for AcquireTokenByUsernamePassword +type acquireTokenByUsernamePasswordOptions struct { + claims, tenantID string +} + +// AcquireByUsernamePasswordOption is implemented by options for AcquireTokenByUsernamePassword +type AcquireByUsernamePasswordOption interface { + acquireByUsernamePasswordOption() +} + +// AcquireTokenByUsernamePassword acquires a security token from the authority, via Username/Password Authentication. +// NOTE: this flow is NOT recommended. +// +// Options: [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username, password string, opts ...AcquireByUsernamePasswordOption) (AuthResult, error) { + o := acquireTokenByUsernamePasswordOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATUsernamePassword + authParams.Claims = o.claims + authParams.Username = username + authParams.Password = password + + token, err := pca.base.Token.UsernamePassword(ctx, authParams) + if err != nil { + return AuthResult{}, err + } + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type DeviceCodeResult = accesstokens.DeviceCodeResult + +// DeviceCode provides the results of the device code flows first stage (containing the code) +// that must be entered on the second device and provides a method to retrieve the AuthenticationResult +// once that code has been entered and verified. +type DeviceCode struct { + // Result holds the information about the device code (such as the code). + Result DeviceCodeResult + + authParams authority.AuthParams + client Client + dc oauth.DeviceCode +} + +// AuthenticationResult retreives the AuthenticationResult once the user enters the code +// on the second device. Until then it blocks until the .AcquireTokenByDeviceCode() context +// is cancelled or the token expires. +func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error) { + token, err := d.dc.Token(ctx) + if err != nil { + return AuthResult{}, err + } + return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true) +} + +// acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode +type acquireTokenByDeviceCodeOptions struct { + claims, tenantID string +} + +// AcquireByDeviceCodeOption is implemented by options for AcquireTokenByDeviceCode +type AcquireByDeviceCodeOption interface { + acquireByDeviceCodeOptions() +} + +// AcquireTokenByDeviceCode acquires a security token from the authority, by acquiring a device code and using that to acquire the token. +// Users need to create an AcquireTokenDeviceCodeParameters instance and pass it in. +// +// Options: [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByDeviceCode(ctx context.Context, scopes []string, opts ...AcquireByDeviceCodeOption) (DeviceCode, error) { + o := acquireTokenByDeviceCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return DeviceCode{}, err + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return DeviceCode{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATDeviceCode + authParams.Claims = o.claims + + dc, err := pca.base.Token.DeviceCode(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dc.Result, authParams: authParams, client: pca, dc: dc}, nil +} + +// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type acquireTokenByAuthCodeOptions struct { + challenge, claims, tenantID string +} + +// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode +type AcquireByAuthCodeOption interface { + acquireByAuthCodeOption() +} + +// WithChallenge allows you to provide a code for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) interface { + AcquireByAuthCodeOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.challenge = challenge + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +// +// Options: [WithChallenge], [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) { + o := acquireTokenByAuthCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: o.challenge, + Claims: o.claims, + AppType: accesstokens.ATPublic, + RedirectURI: redirectURI, + TenantID: o.tenantID, + } + + return pca.base.AcquireTokenByAuthCode(ctx, params) +} + +// Accounts gets all the accounts in the token cache. +// If there are no accounts in the cache the returned slice is empty. +func (pca Client) Accounts(ctx context.Context) ([]Account, error) { + return pca.base.AllAccounts(ctx) +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (pca Client) RemoveAccount(ctx context.Context, account Account) error { + return pca.base.RemoveAccount(ctx, account) +} + +// interactiveAuthOptions contains the optional parameters used to acquire an access token for interactive auth code flow. +type interactiveAuthOptions struct { + claims, domainHint, loginHint, redirectURI, tenantID string +} + +// AcquireInteractiveOption is implemented by options for AcquireTokenInteractive +type AcquireInteractiveOption interface { + acquireInteractiveOption() +} + +// WithLoginHint pre-populates the login prompt with a username. +func WithLoginHint(username string) interface { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.loginHint = username + case *interactiveAuthOptions: + t.loginHint = username + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url. +func WithDomainHint(domain string) interface { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.domainHint = domain + case *interactiveAuthOptions: + t.domainHint = domain + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithRedirectURI sets a port for the local server used in interactive authentication, for +// example http://localhost:port. All URI components other than the port are ignored. +func WithRedirectURI(redirectURI string) interface { + AcquireInteractiveOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *interactiveAuthOptions: + t.redirectURI = redirectURI + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenInteractive acquires a security token from the authority using the default web browser to select the account. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/msal-authentication-flows#interactive-and-non-interactive-authentication +// +// Options: [WithDomainHint], [WithLoginHint], [WithRedirectURI], [WithTenantID] +func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, opts ...AcquireInteractiveOption) (AuthResult, error) { + o := interactiveAuthOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + // the code verifier is a random 32-byte sequence that's been base-64 encoded without padding. + // it's used to prevent MitM attacks during auth code flow, see https://tools.ietf.org/html/rfc7636 + cv, challenge, err := codeVerifier() + if err != nil { + return AuthResult{}, err + } + var redirectURL *url.URL + if o.redirectURI != "" { + redirectURL, err = url.Parse(o.redirectURI) + if err != nil { + return AuthResult{}, err + } + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATInteractive + authParams.Claims = o.claims + authParams.CodeChallenge = challenge + authParams.CodeChallengeMethod = "S256" + authParams.LoginHint = o.loginHint + authParams.DomainHint = o.domainHint + authParams.State = uuid.New().String() + authParams.Prompt = "select_account" + res, err := pca.browserLogin(ctx, redirectURL, authParams) + if err != nil { + return AuthResult{}, err + } + authParams.Redirecturi = res.redirectURI + + req, err := accesstokens.NewCodeChallengeRequest(authParams, accesstokens.ATPublic, nil, res.authCode, cv) + if err != nil { + return AuthResult{}, err + } + + token, err := pca.base.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type interactiveAuthResult struct { + authCode string + redirectURI string +} + +// provides a test hook to simulate opening a browser +var browserOpenURL = func(authURL string) error { + return browser.OpenURL(authURL) +} + +// parses the port number from the provided URL. +// returns 0 if nil or no port is specified. +func parsePort(u *url.URL) (int, error) { + if u == nil { + return 0, nil + } + p := u.Port() + if p == "" { + return 0, nil + } + return strconv.Atoi(p) +} + +// browserLogin launches the system browser for interactive login +func (pca Client) browserLogin(ctx context.Context, redirectURI *url.URL, params authority.AuthParams) (interactiveAuthResult, error) { + // start local redirect server so login can call us back + port, err := parsePort(redirectURI) + if err != nil { + return interactiveAuthResult{}, err + } + srv, err := local.New(params.State, port) + if err != nil { + return interactiveAuthResult{}, err + } + defer srv.Shutdown() + params.Scopes = accesstokens.AppendDefaultScopes(params) + authURL, err := pca.base.AuthCodeURL(ctx, params.ClientID, srv.Addr, params.Scopes, params) + if err != nil { + return interactiveAuthResult{}, err + } + // open browser window so user can select credentials + if err := browserOpenURL(authURL); err != nil { + return interactiveAuthResult{}, err + } + // now wait until the logic calls us back + res := srv.Result(ctx) + if res.Err != nil { + return interactiveAuthResult{}, res.Err + } + return interactiveAuthResult{ + authCode: res.Code, + redirectURI: srv.Addr, + }, nil +} + +// creates a code verifier string along with its SHA256 hash which +// is used as the challenge when requesting an auth code. +// used in interactive auth flow for PKCE. +func codeVerifier() (codeVerifier string, challenge string, err error) { + cvBytes := make([]byte, 32) + if _, err = rand.Read(cvBytes); err != nil { + return + } + codeVerifier = base64.RawURLEncoding.EncodeToString(cvBytes) + // for PKCE, create a hash of the code verifier + cvh := sha256.Sum256([]byte(codeVerifier)) + challenge = base64.RawURLEncoding.EncodeToString(cvh[:]) + return +} diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml index af403bb1..7b503d26 100644 --- a/vendor/github.com/Microsoft/go-winio/.golangci.yml +++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -8,12 +8,8 @@ linters: - containedctx # struct contains a context - dupl # duplicate code - errname # erorrs are named correctly - - goconst # strings that should be constants - - godot # comments end in a period - - misspell - nolintlint # "//nolint" directives are properly explained - revive # golint replacement - - stylecheck # golint replacement, less configurable than revive - unconvert # unnecessary conversions - wastedassign @@ -23,10 +19,7 @@ linters: - exhaustive # check exhaustiveness of enum switch statements - gofmt # files are gofmt'ed - gosec # security - - nestif # deeply nested ifs - nilerr # returns nil even with non-nil error - - prealloc # slices that can be pre-allocated - - structcheck # unused struct fields - unparam # unused function params issues: @@ -42,6 +35,18 @@ issues: text: "^line-length-limit: " source: "^//(go:generate|sys) " + #TODO: remove after upgrading to go1.18 + # ignore comment spacing for nolint and sys directives + - linters: + - revive + text: "^comment-spacings: no space between comment delimiter and comment text" + source: "//(cspell:|nolint:|sys |todo)" + + # not on go 1.18 yet, so no any + - linters: + - revive + text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'" + # allow unjustified ignores of error checks in defer statements - linters: - nolintlint @@ -56,6 +61,8 @@ issues: linters-settings: + exhaustive: + default-signifies-exhaustive: true govet: enable-all: true disable: @@ -98,6 +105,8 @@ linters-settings: disabled: true - name: flag-parameter # excessive, and a common idiom we use disabled: true + - name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead + disabled: true # general config - name: line-length-limit arguments: @@ -138,7 +147,3 @@ linters-settings: - VPCI - WCOW - WIM - stylecheck: - checks: - - "all" - - "-ST1003" # use revive's var naming diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index 52f1c280..c8819165 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -23,7 +23,7 @@ import ( const afHVSock = 34 // AF_HYPERV // Well known Service and VM IDs -//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards +// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards // HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 @@ -31,7 +31,7 @@ func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 } // HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. -func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff +func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff return guid.GUID{ Data1: 0xffffffff, Data2: 0xffff, @@ -246,7 +246,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { var addrbuf [addrlen * 2]byte var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o) + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) } diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go new file mode 100644 index 00000000..1f653881 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go @@ -0,0 +1,2 @@ +// This package contains Win32 filesystem functionality. +package fs diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go new file mode 100644 index 00000000..509b3ec6 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go @@ -0,0 +1,202 @@ +//go:build windows + +package fs + +import ( + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/stringbuffer" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go + +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew +//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW + +const NullHandle windows.Handle = 0 + +// AccessMask defines standard, specific, and generic rights. +// +// Bitmask: +// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 +// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 +// +---------------+---------------+-------------------------------+ +// |G|G|G|G|Resvd|A| StandardRights| SpecificRights | +// |R|W|E|A| |S| | | +// +-+-------------+---------------+-------------------------------+ +// +// GR Generic Read +// GW Generic Write +// GE Generic Exectue +// GA Generic All +// Resvd Reserved +// AS Access Security System +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights +// +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants +type AccessMask = windows.ACCESS_MASK + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // Not actually any. + // + // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device" + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters + FILE_ANY_ACCESS AccessMask = 0 + + // Specific Object Access + // from ntioapi.h + + FILE_READ_DATA AccessMask = (0x0001) // file & pipe + FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory + + FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe + FILE_ADD_FILE AccessMask = (0x0002) // directory + + FILE_APPEND_DATA AccessMask = (0x0004) // file + FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory + FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe + + FILE_READ_EA AccessMask = (0x0008) // file & directory + FILE_READ_PROPERTIES AccessMask = FILE_READ_EA + + FILE_WRITE_EA AccessMask = (0x0010) // file & directory + FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA + + FILE_EXECUTE AccessMask = (0x0020) // file + FILE_TRAVERSE AccessMask = (0x0020) // directory + + FILE_DELETE_CHILD AccessMask = (0x0040) // directory + + FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all + + FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all + + FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF) + FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE) + FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE) + FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE) + + SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF + + // Standard Access + // from ntseapi.h + + DELETE AccessMask = 0x0001_0000 + READ_CONTROL AccessMask = 0x0002_0000 + WRITE_DAC AccessMask = 0x0004_0000 + WRITE_OWNER AccessMask = 0x0008_0000 + SYNCHRONIZE AccessMask = 0x0010_0000 + + STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000 + + STANDARD_RIGHTS_READ AccessMask = READ_CONTROL + STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL + STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL + + STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000 +) + +type FileShareMode uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + FILE_SHARE_NONE FileShareMode = 0x00 + FILE_SHARE_READ FileShareMode = 0x01 + FILE_SHARE_WRITE FileShareMode = 0x02 + FILE_SHARE_DELETE FileShareMode = 0x04 + FILE_SHARE_VALID_FLAGS FileShareMode = 0x07 +) + +type FileCreationDisposition uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // from winbase.h + + CREATE_NEW FileCreationDisposition = 0x01 + CREATE_ALWAYS FileCreationDisposition = 0x02 + OPEN_EXISTING FileCreationDisposition = 0x03 + OPEN_ALWAYS FileCreationDisposition = 0x04 + TRUNCATE_EXISTING FileCreationDisposition = 0x05 +) + +// CreateFile and co. take flags or attributes together as one parameter. +// Define alias until we can use generics to allow both + +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants +type FileFlagOrAttribute uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( // from winnt.h + FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 + FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 + FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 + FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000 + FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000 + FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000 + FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000 + FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000 + FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000 + FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000 + FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 +) + +type FileSQSFlag = FileFlagOrAttribute + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( // from winbase.h + SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) + SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) + SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) + SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) + + SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000 + SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000 +) + +// GetFinalPathNameByHandle flags +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters +type GetFinalPathFlag uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + GetFinalPathDefaultFlag GetFinalPathFlag = 0x0 + + FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0 + FILE_NAME_OPENED GetFinalPathFlag = 0x8 + + VOLUME_NAME_DOS GetFinalPathFlag = 0x0 + VOLUME_NAME_GUID GetFinalPathFlag = 0x1 + VOLUME_NAME_NT GetFinalPathFlag = 0x2 + VOLUME_NAME_NONE GetFinalPathFlag = 0x4 +) + +// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle +// with the given handle and flags. It transparently takes care of creating a buffer of the +// correct size for the call. +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew +func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) { + b := stringbuffer.NewWString() + //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n? + for { + n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags)) + if err != nil { + return "", err + } + // If the buffer wasn't large enough, n will be the total size needed (including null terminator). + // Resize and try again. + if n > b.Cap() { + b.ResizeTo(n) + continue + } + // If the buffer is large enough, n will be the size not including the null terminator. + // Convert to a Go string and return. + return b.String(), nil + } +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go new file mode 100644 index 00000000..81760ac6 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go @@ -0,0 +1,12 @@ +package fs + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level +type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32` + +// Impersonation levels +const ( + SecurityAnonymous SecurityImpersonationLevel = 0 + SecurityIdentification SecurityImpersonationLevel = 1 + SecurityImpersonation SecurityImpersonationLevel = 2 + SecurityDelegation SecurityImpersonationLevel = 3 +) diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go new file mode 100644 index 00000000..e2f7bb24 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go @@ -0,0 +1,64 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package fs + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procCreateFileW = modkernel32.NewProc("CreateFileW") +) + +func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go index 39e8c05f..aeb7b725 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -100,8 +100,8 @@ func (f *runtimeFunc) Load() error { (*byte)(unsafe.Pointer(&f.addr)), uint32(unsafe.Sizeof(f.addr)), &n, - nil, //overlapped - 0, //completionRoutine + nil, // overlapped + 0, // completionRoutine ) }) return f.err diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go new file mode 100644 index 00000000..7ad50570 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go @@ -0,0 +1,132 @@ +package stringbuffer + +import ( + "sync" + "unicode/utf16" +) + +// TODO: worth exporting and using in mkwinsyscall? + +// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate +// large path strings: +// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310. +const MinWStringCap = 310 + +// use *[]uint16 since []uint16 creates an extra allocation where the slice header +// is copied to heap and then referenced via pointer in the interface header that sync.Pool +// stores. +var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly + New: func() interface{} { + b := make([]uint16, MinWStringCap) + return &b + }, +} + +func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) } + +// freeBuffer copies the slice header data, and puts a pointer to that in the pool. +// This avoids taking a pointer to the slice header in WString, which can be set to nil. +func freeBuffer(b []uint16) { pathPool.Put(&b) } + +// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings +// for interacting with Win32 APIs. +// Sizes are specified as uint32 and not int. +// +// It is not thread safe. +type WString struct { + // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future. + + // raw buffer + b []uint16 +} + +// NewWString returns a [WString] allocated from a shared pool with an +// initial capacity of at least [MinWStringCap]. +// Since the buffer may have been previously used, its contents are not guaranteed to be empty. +// +// The buffer should be freed via [WString.Free] +func NewWString() *WString { + return &WString{ + b: newBuffer(), + } +} + +func (b *WString) Free() { + if b.empty() { + return + } + freeBuffer(b.b) + b.b = nil +} + +// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the +// previous buffer back into pool. +func (b *WString) ResizeTo(c uint32) uint32 { + // allready sufficient (or n is 0) + if c <= b.Cap() { + return b.Cap() + } + + if c <= MinWStringCap { + c = MinWStringCap + } + // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places + if c <= 2*b.Cap() { + c = 2 * b.Cap() + } + + b2 := make([]uint16, c) + if !b.empty() { + copy(b2, b.b) + freeBuffer(b.b) + } + b.b = b2 + return c +} + +// Buffer returns the underlying []uint16 buffer. +func (b *WString) Buffer() []uint16 { + if b.empty() { + return nil + } + return b.b +} + +// Pointer returns a pointer to the first uint16 in the buffer. +// If the [WString.Free] has already been called, the pointer will be nil. +func (b *WString) Pointer() *uint16 { + if b.empty() { + return nil + } + return &b.b[0] +} + +// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer. +// +// It assumes that the data is null-terminated. +func (b *WString) String() string { + // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows" + // and would make this code Windows-only, which makes no sense. + // So copy UTF16ToString code into here. + // If other windows-specific code is added, switch to [windows.UTF16ToString] + + s := b.b + for i, v := range s { + if v == 0 { + s = s[:i] + break + } + } + return string(utf16.Decode(s)) +} + +// Cap returns the underlying buffer capacity. +func (b *WString) Cap() uint32 { + if b.empty() { + return 0 + } + return b.cap() +} + +func (b *WString) cap() uint32 { return uint32(cap(b.b)) } +func (b *WString) empty() bool { return b == nil || b.cap() == 0 } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index ca6e38fc..25cc8110 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -16,11 +16,12 @@ import ( "unsafe" "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/fs" ) //sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe //sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc @@ -163,19 +164,21 @@ func (s pipeAddress) String() string { } // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { +func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) { for { select { case <-ctx.Done(): return syscall.Handle(0), ctx.Err() default: - h, err := createFile(*path, + wh, err := fs.CreateFile(*path, access, - 0, - nil, - syscall.OPEN_EXISTING, - windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS, - 0) + 0, // mode + nil, // security attributes + fs.OPEN_EXISTING, + fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS, + 0, // template file handle + ) + h := syscall.Handle(wh) if err == nil { return h, nil } @@ -219,7 +222,7 @@ func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { var err error var h syscall.Handle - h, err = tryDialPipe(ctx, &path, access) + h, err = tryDialPipe(ctx, &path, fs.AccessMask(access)) if err != nil { return nil, err } @@ -279,6 +282,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy } defer localFree(ntPath.Buffer) oa.ObjectName = &ntPath + oa.Attributes = windows.OBJ_CASE_INSENSITIVE // The security descriptor is only needed for the first pipe. if first { diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 83f45a13..469b16f6 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -63,7 +63,6 @@ var ( procBackupWrite = modkernel32.NewProc("BackupWrite") procCancelIoEx = modkernel32.NewProc("CancelIoEx") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateFileW = modkernel32.NewProc("CreateFileW") procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") @@ -305,24 +304,6 @@ func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { return } -func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) -} - -func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = errnoErr(e1) - } - return -} - func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) newport = syscall.Handle(r0) diff --git a/vendor/github.com/armon/go-metrics/.gitignore b/vendor/github.com/armon/go-metrics/.gitignore deleted file mode 100644 index e5750f57..00000000 --- a/vendor/github.com/armon/go-metrics/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -/metrics.out - -.idea diff --git a/vendor/github.com/armon/go-metrics/.travis.yml b/vendor/github.com/armon/go-metrics/.travis.yml deleted file mode 100644 index 87d230c8..00000000 --- a/vendor/github.com/armon/go-metrics/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - "1.x" - -env: - - GO111MODULE=on - -install: - - go get ./... - -script: - - go test ./... diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE deleted file mode 100644 index 106569e5..00000000 --- a/vendor/github.com/armon/go-metrics/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-metrics/README.md b/vendor/github.com/armon/go-metrics/README.md deleted file mode 100644 index aa73348c..00000000 --- a/vendor/github.com/armon/go-metrics/README.md +++ /dev/null @@ -1,91 +0,0 @@ -go-metrics -========== - -This library provides a `metrics` package which can be used to instrument code, -expose application metrics, and profile runtime performance in a flexible manner. - -Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics) - -Sinks ------ - -The `metrics` package makes use of a `MetricSink` interface to support delivery -to any type of backend. Currently the following sinks are provided: - -* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP) -* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP) -* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes) -* InmemSink : Provides in-memory aggregation, can be used to export stats -* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. -* BlackholeSink : Sinks to nowhere - -In addition to the sinks, the `InmemSignal` can be used to catch a signal, -and dump a formatted output of recent metrics. For example, when a process gets -a SIGUSR1, it can dump to stderr recent performance metrics for debugging. - -Labels ------- - -Most metrics do have an equivalent ending with `WithLabels`, such methods -allow to push metrics with labels and use some features of underlying Sinks -(ex: translated into Prometheus labels). - -Since some of these labels may increase greatly cardinality of metrics, the -library allow to filter labels using a blacklist/whitelist filtering system -which is global to all metrics. - -* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default. -* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks. - -By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that -no tags are filetered at all, but it allow to a user to globally block some tags with high -cardinality at application level. - -Examples --------- - -Here is an example of using the package: - -```go -func SlowMethod() { - // Profiling the runtime of a method - defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) -} - -// Configure a statsite sink as the global metrics sink -sink, _ := metrics.NewStatsiteSink("statsite:8125") -metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) - -// Emit a Key/Value pair -metrics.EmitKey([]string{"questions", "meaning of life"}, 42) -``` - -Here is an example of setting up a signal handler: - -```go -// Setup the inmem sink and signal handler -inm := metrics.NewInmemSink(10*time.Second, time.Minute) -sig := metrics.DefaultInmemSignal(inm) -metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) - -// Run some code -inm.SetGauge([]string{"foo"}, 42) -inm.EmitKey([]string{"bar"}, 30) - -inm.IncrCounter([]string{"baz"}, 42) -inm.IncrCounter([]string{"baz"}, 1) -inm.IncrCounter([]string{"baz"}, 80) - -inm.AddSample([]string{"method", "wow"}, 42) -inm.AddSample([]string{"method", "wow"}, 100) -inm.AddSample([]string{"method", "wow"}, 22) - -.... -``` - -When a signal comes in, output like the following will be dumped to stderr: - - [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 - [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 - [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 - [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 \ No newline at end of file diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go deleted file mode 100644 index 31098dd5..00000000 --- a/vendor/github.com/armon/go-metrics/const_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows - -package metrics - -import ( - "syscall" -) - -const ( - // DefaultSignal is used with DefaultInmemSignal - DefaultSignal = syscall.SIGUSR1 -) diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go deleted file mode 100644 index 38136af3..00000000 --- a/vendor/github.com/armon/go-metrics/const_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package metrics - -import ( - "syscall" -) - -const ( - // DefaultSignal is used with DefaultInmemSignal - // Windows has no SIGUSR1, use SIGBREAK - DefaultSignal = syscall.Signal(21) -) diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go deleted file mode 100644 index 7c427aca..00000000 --- a/vendor/github.com/armon/go-metrics/inmem.go +++ /dev/null @@ -1,339 +0,0 @@ -package metrics - -import ( - "bytes" - "fmt" - "math" - "net/url" - "strings" - "sync" - "time" -) - -var spaceReplacer = strings.NewReplacer(" ", "_") - -// InmemSink provides a MetricSink that does in-memory aggregation -// without sending metrics over a network. It can be embedded within -// an application to provide profiling information. -type InmemSink struct { - // How long is each aggregation interval - interval time.Duration - - // Retain controls how many metrics interval we keep - retain time.Duration - - // maxIntervals is the maximum length of intervals. - // It is retain / interval. - maxIntervals int - - // intervals is a slice of the retained intervals - intervals []*IntervalMetrics - intervalLock sync.RWMutex - - rateDenom float64 -} - -// IntervalMetrics stores the aggregated metrics -// for a specific interval -type IntervalMetrics struct { - sync.RWMutex - - // The start time of the interval - Interval time.Time - - // Gauges maps the key to the last set value - Gauges map[string]GaugeValue - - // Points maps the string to the list of emitted values - // from EmitKey - Points map[string][]float32 - - // Counters maps the string key to a sum of the counter - // values - Counters map[string]SampledValue - - // Samples maps the key to an AggregateSample, - // which has the rolled up view of a sample - Samples map[string]SampledValue - - // done is closed when this interval has ended, and a new IntervalMetrics - // has been created to receive any future metrics. - done chan struct{} -} - -// NewIntervalMetrics creates a new IntervalMetrics for a given interval -func NewIntervalMetrics(intv time.Time) *IntervalMetrics { - return &IntervalMetrics{ - Interval: intv, - Gauges: make(map[string]GaugeValue), - Points: make(map[string][]float32), - Counters: make(map[string]SampledValue), - Samples: make(map[string]SampledValue), - done: make(chan struct{}), - } -} - -// AggregateSample is used to hold aggregate metrics -// about a sample -type AggregateSample struct { - Count int // The count of emitted pairs - Rate float64 // The values rate per time unit (usually 1 second) - Sum float64 // The sum of values - SumSq float64 `json:"-"` // The sum of squared values - Min float64 // Minimum value - Max float64 // Maximum value - LastUpdated time.Time `json:"-"` // When value was last updated -} - -// Computes a Stddev of the values -func (a *AggregateSample) Stddev() float64 { - num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) - div := float64(a.Count * (a.Count - 1)) - if div == 0 { - return 0 - } - return math.Sqrt(num / div) -} - -// Computes a mean of the values -func (a *AggregateSample) Mean() float64 { - if a.Count == 0 { - return 0 - } - return a.Sum / float64(a.Count) -} - -// Ingest is used to update a sample -func (a *AggregateSample) Ingest(v float64, rateDenom float64) { - a.Count++ - a.Sum += v - a.SumSq += (v * v) - if v < a.Min || a.Count == 1 { - a.Min = v - } - if v > a.Max || a.Count == 1 { - a.Max = v - } - a.Rate = float64(a.Sum) / rateDenom - a.LastUpdated = time.Now() -} - -func (a *AggregateSample) String() string { - if a.Count == 0 { - return "Count: 0" - } else if a.Stddev() == 0 { - return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) - } else { - return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", - a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) - } -} - -// NewInmemSinkFromURL creates an InmemSink from a URL. It is used -// (and tested) from NewMetricSinkFromURL. -func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) { - params := u.Query() - - interval, err := time.ParseDuration(params.Get("interval")) - if err != nil { - return nil, fmt.Errorf("Bad 'interval' param: %s", err) - } - - retain, err := time.ParseDuration(params.Get("retain")) - if err != nil { - return nil, fmt.Errorf("Bad 'retain' param: %s", err) - } - - return NewInmemSink(interval, retain), nil -} - -// NewInmemSink is used to construct a new in-memory sink. -// Uses an aggregation interval and maximum retention period. -func NewInmemSink(interval, retain time.Duration) *InmemSink { - rateTimeUnit := time.Second - i := &InmemSink{ - interval: interval, - retain: retain, - maxIntervals: int(retain / interval), - rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()), - } - i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) - return i -} - -func (i *InmemSink) SetGauge(key []string, val float32) { - i.SetGaugeWithLabels(key, val, nil) -} - -func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { - k, name := i.flattenKeyLabels(key, labels) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels} -} - -func (i *InmemSink) EmitKey(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - vals := intv.Points[k] - intv.Points[k] = append(vals, val) -} - -func (i *InmemSink) IncrCounter(key []string, val float32) { - i.IncrCounterWithLabels(key, val, nil) -} - -func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { - k, name := i.flattenKeyLabels(key, labels) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - - agg, ok := intv.Counters[k] - if !ok { - agg = SampledValue{ - Name: name, - AggregateSample: &AggregateSample{}, - Labels: labels, - } - intv.Counters[k] = agg - } - agg.Ingest(float64(val), i.rateDenom) -} - -func (i *InmemSink) AddSample(key []string, val float32) { - i.AddSampleWithLabels(key, val, nil) -} - -func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) { - k, name := i.flattenKeyLabels(key, labels) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - - agg, ok := intv.Samples[k] - if !ok { - agg = SampledValue{ - Name: name, - AggregateSample: &AggregateSample{}, - Labels: labels, - } - intv.Samples[k] = agg - } - agg.Ingest(float64(val), i.rateDenom) -} - -// Data is used to retrieve all the aggregated metrics -// Intervals may be in use, and a read lock should be acquired -func (i *InmemSink) Data() []*IntervalMetrics { - // Get the current interval, forces creation - i.getInterval() - - i.intervalLock.RLock() - defer i.intervalLock.RUnlock() - - n := len(i.intervals) - intervals := make([]*IntervalMetrics, n) - - copy(intervals[:n-1], i.intervals[:n-1]) - current := i.intervals[n-1] - - // make its own copy for current interval - intervals[n-1] = &IntervalMetrics{} - copyCurrent := intervals[n-1] - current.RLock() - *copyCurrent = *current - // RWMutex is not safe to copy, so create a new instance on the copy - copyCurrent.RWMutex = sync.RWMutex{} - - copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges)) - for k, v := range current.Gauges { - copyCurrent.Gauges[k] = v - } - // saved values will be not change, just copy its link - copyCurrent.Points = make(map[string][]float32, len(current.Points)) - for k, v := range current.Points { - copyCurrent.Points[k] = v - } - copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters)) - for k, v := range current.Counters { - copyCurrent.Counters[k] = v.deepCopy() - } - copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples)) - for k, v := range current.Samples { - copyCurrent.Samples[k] = v.deepCopy() - } - current.RUnlock() - - return intervals -} - -// getInterval returns the current interval. A new interval is created if no -// previous interval exists, or if the current time is beyond the window for the -// current interval. -func (i *InmemSink) getInterval() *IntervalMetrics { - intv := time.Now().Truncate(i.interval) - - // Attempt to return the existing interval first, because it only requires - // a read lock. - i.intervalLock.RLock() - n := len(i.intervals) - if n > 0 && i.intervals[n-1].Interval == intv { - defer i.intervalLock.RUnlock() - return i.intervals[n-1] - } - i.intervalLock.RUnlock() - - i.intervalLock.Lock() - defer i.intervalLock.Unlock() - - // Re-check for an existing interval now that the lock is re-acquired. - n = len(i.intervals) - if n > 0 && i.intervals[n-1].Interval == intv { - return i.intervals[n-1] - } - - current := NewIntervalMetrics(intv) - i.intervals = append(i.intervals, current) - if n > 0 { - close(i.intervals[n-1].done) - } - - n++ - // Prune old intervals if the count exceeds the max. - if n >= i.maxIntervals { - copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) - i.intervals = i.intervals[:i.maxIntervals] - } - return current -} - -// Flattens the key for formatting, removes spaces -func (i *InmemSink) flattenKey(parts []string) string { - buf := &bytes.Buffer{} - - joined := strings.Join(parts, ".") - - spaceReplacer.WriteString(buf, joined) - - return buf.String() -} - -// Flattens the key for formatting along with its labels, removes spaces -func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) { - key := i.flattenKey(parts) - buf := bytes.NewBufferString(key) - - for _, label := range labels { - spaceReplacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value)) - } - - return buf.String(), key -} diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go deleted file mode 100644 index 24eefa96..00000000 --- a/vendor/github.com/armon/go-metrics/inmem_endpoint.go +++ /dev/null @@ -1,162 +0,0 @@ -package metrics - -import ( - "context" - "fmt" - "net/http" - "sort" - "time" -) - -// MetricsSummary holds a roll-up of metrics info for a given interval -type MetricsSummary struct { - Timestamp string - Gauges []GaugeValue - Points []PointValue - Counters []SampledValue - Samples []SampledValue -} - -type GaugeValue struct { - Name string - Hash string `json:"-"` - Value float32 - - Labels []Label `json:"-"` - DisplayLabels map[string]string `json:"Labels"` -} - -type PointValue struct { - Name string - Points []float32 -} - -type SampledValue struct { - Name string - Hash string `json:"-"` - *AggregateSample - Mean float64 - Stddev float64 - - Labels []Label `json:"-"` - DisplayLabels map[string]string `json:"Labels"` -} - -// deepCopy allocates a new instance of AggregateSample -func (source *SampledValue) deepCopy() SampledValue { - dest := *source - if source.AggregateSample != nil { - dest.AggregateSample = &AggregateSample{} - *dest.AggregateSample = *source.AggregateSample - } - return dest -} - -// DisplayMetrics returns a summary of the metrics from the most recent finished interval. -func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - data := i.Data() - - var interval *IntervalMetrics - n := len(data) - switch { - case n == 0: - return nil, fmt.Errorf("no metric intervals have been initialized yet") - case n == 1: - // Show the current interval if it's all we have - interval = data[0] - default: - // Show the most recent finished interval if we have one - interval = data[n-2] - } - - return newMetricSummaryFromInterval(interval), nil -} - -func newMetricSummaryFromInterval(interval *IntervalMetrics) MetricsSummary { - interval.RLock() - defer interval.RUnlock() - - summary := MetricsSummary{ - Timestamp: interval.Interval.Round(time.Second).UTC().String(), - Gauges: make([]GaugeValue, 0, len(interval.Gauges)), - Points: make([]PointValue, 0, len(interval.Points)), - } - - // Format and sort the output of each metric type, so it gets displayed in a - // deterministic order. - for name, points := range interval.Points { - summary.Points = append(summary.Points, PointValue{name, points}) - } - sort.Slice(summary.Points, func(i, j int) bool { - return summary.Points[i].Name < summary.Points[j].Name - }) - - for hash, value := range interval.Gauges { - value.Hash = hash - value.DisplayLabels = make(map[string]string) - for _, label := range value.Labels { - value.DisplayLabels[label.Name] = label.Value - } - value.Labels = nil - - summary.Gauges = append(summary.Gauges, value) - } - sort.Slice(summary.Gauges, func(i, j int) bool { - return summary.Gauges[i].Hash < summary.Gauges[j].Hash - }) - - summary.Counters = formatSamples(interval.Counters) - summary.Samples = formatSamples(interval.Samples) - - return summary -} - -func formatSamples(source map[string]SampledValue) []SampledValue { - output := make([]SampledValue, 0, len(source)) - for hash, sample := range source { - displayLabels := make(map[string]string) - for _, label := range sample.Labels { - displayLabels[label.Name] = label.Value - } - - output = append(output, SampledValue{ - Name: sample.Name, - Hash: hash, - AggregateSample: sample.AggregateSample, - Mean: sample.AggregateSample.Mean(), - Stddev: sample.AggregateSample.Stddev(), - DisplayLabels: displayLabels, - }) - } - sort.Slice(output, func(i, j int) bool { - return output[i].Hash < output[j].Hash - }) - - return output -} - -type Encoder interface { - Encode(interface{}) error -} - -// Stream writes metrics using encoder.Encode each time an interval ends. Runs -// until the request context is cancelled, or the encoder returns an error. -// The caller is responsible for logging any errors from encoder. -func (i *InmemSink) Stream(ctx context.Context, encoder Encoder) { - interval := i.getInterval() - - for { - select { - case <-interval.done: - summary := newMetricSummaryFromInterval(interval) - if err := encoder.Encode(summary); err != nil { - return - } - - // update interval to the next one - interval = i.getInterval() - case <-ctx.Done(): - return - } - } -} diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go deleted file mode 100644 index 0937f4ae..00000000 --- a/vendor/github.com/armon/go-metrics/inmem_signal.go +++ /dev/null @@ -1,117 +0,0 @@ -package metrics - -import ( - "bytes" - "fmt" - "io" - "os" - "os/signal" - "strings" - "sync" - "syscall" -) - -// InmemSignal is used to listen for a given signal, and when received, -// to dump the current metrics from the InmemSink to an io.Writer -type InmemSignal struct { - signal syscall.Signal - inm *InmemSink - w io.Writer - sigCh chan os.Signal - - stop bool - stopCh chan struct{} - stopLock sync.Mutex -} - -// NewInmemSignal creates a new InmemSignal which listens for a given signal, -// and dumps the current metrics out to a writer -func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { - i := &InmemSignal{ - signal: sig, - inm: inmem, - w: w, - sigCh: make(chan os.Signal, 1), - stopCh: make(chan struct{}), - } - signal.Notify(i.sigCh, sig) - go i.run() - return i -} - -// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 -// and writes output to stderr. Windows uses SIGBREAK -func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { - return NewInmemSignal(inmem, DefaultSignal, os.Stderr) -} - -// Stop is used to stop the InmemSignal from listening -func (i *InmemSignal) Stop() { - i.stopLock.Lock() - defer i.stopLock.Unlock() - - if i.stop { - return - } - i.stop = true - close(i.stopCh) - signal.Stop(i.sigCh) -} - -// run is a long running routine that handles signals -func (i *InmemSignal) run() { - for { - select { - case <-i.sigCh: - i.dumpStats() - case <-i.stopCh: - return - } - } -} - -// dumpStats is used to dump the data to output writer -func (i *InmemSignal) dumpStats() { - buf := bytes.NewBuffer(nil) - - data := i.inm.Data() - // Skip the last period which is still being aggregated - for j := 0; j < len(data)-1; j++ { - intv := data[j] - intv.RLock() - for _, val := range intv.Gauges { - name := i.flattenLabels(val.Name, val.Labels) - fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value) - } - for name, vals := range intv.Points { - for _, val := range vals { - fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) - } - } - for _, agg := range intv.Counters { - name := i.flattenLabels(agg.Name, agg.Labels) - fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample) - } - for _, agg := range intv.Samples { - name := i.flattenLabels(agg.Name, agg.Labels) - fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample) - } - intv.RUnlock() - } - - // Write out the bytes - i.w.Write(buf.Bytes()) -} - -// Flattens the key for formatting along with its labels, removes spaces -func (i *InmemSignal) flattenLabels(name string, labels []Label) string { - buf := bytes.NewBufferString(name) - replacer := strings.NewReplacer(" ", "_", ":", "_") - - for _, label := range labels { - replacer.WriteString(buf, ".") - replacer.WriteString(buf, label.Value) - } - - return buf.String() -} diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go deleted file mode 100644 index 36642a42..00000000 --- a/vendor/github.com/armon/go-metrics/metrics.go +++ /dev/null @@ -1,299 +0,0 @@ -package metrics - -import ( - "runtime" - "strings" - "time" - - iradix "github.com/hashicorp/go-immutable-radix" -) - -type Label struct { - Name string - Value string -} - -func (m *Metrics) SetGauge(key []string, val float32) { - m.SetGaugeWithLabels(key, val, nil) -} - -func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) { - if m.HostName != "" { - if m.EnableHostnameLabel { - labels = append(labels, Label{"host", m.HostName}) - } else if m.EnableHostname { - key = insert(0, m.HostName, key) - } - } - if m.EnableTypePrefix { - key = insert(0, "gauge", key) - } - if m.ServiceName != "" { - if m.EnableServiceLabel { - labels = append(labels, Label{"service", m.ServiceName}) - } else { - key = insert(0, m.ServiceName, key) - } - } - allowed, labelsFiltered := m.allowMetric(key, labels) - if !allowed { - return - } - m.sink.SetGaugeWithLabels(key, val, labelsFiltered) -} - -func (m *Metrics) EmitKey(key []string, val float32) { - if m.EnableTypePrefix { - key = insert(0, "kv", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - allowed, _ := m.allowMetric(key, nil) - if !allowed { - return - } - m.sink.EmitKey(key, val) -} - -func (m *Metrics) IncrCounter(key []string, val float32) { - m.IncrCounterWithLabels(key, val, nil) -} - -func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) { - if m.HostName != "" && m.EnableHostnameLabel { - labels = append(labels, Label{"host", m.HostName}) - } - if m.EnableTypePrefix { - key = insert(0, "counter", key) - } - if m.ServiceName != "" { - if m.EnableServiceLabel { - labels = append(labels, Label{"service", m.ServiceName}) - } else { - key = insert(0, m.ServiceName, key) - } - } - allowed, labelsFiltered := m.allowMetric(key, labels) - if !allowed { - return - } - m.sink.IncrCounterWithLabels(key, val, labelsFiltered) -} - -func (m *Metrics) AddSample(key []string, val float32) { - m.AddSampleWithLabels(key, val, nil) -} - -func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) { - if m.HostName != "" && m.EnableHostnameLabel { - labels = append(labels, Label{"host", m.HostName}) - } - if m.EnableTypePrefix { - key = insert(0, "sample", key) - } - if m.ServiceName != "" { - if m.EnableServiceLabel { - labels = append(labels, Label{"service", m.ServiceName}) - } else { - key = insert(0, m.ServiceName, key) - } - } - allowed, labelsFiltered := m.allowMetric(key, labels) - if !allowed { - return - } - m.sink.AddSampleWithLabels(key, val, labelsFiltered) -} - -func (m *Metrics) MeasureSince(key []string, start time.Time) { - m.MeasureSinceWithLabels(key, start, nil) -} - -func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { - if m.HostName != "" && m.EnableHostnameLabel { - labels = append(labels, Label{"host", m.HostName}) - } - if m.EnableTypePrefix { - key = insert(0, "timer", key) - } - if m.ServiceName != "" { - if m.EnableServiceLabel { - labels = append(labels, Label{"service", m.ServiceName}) - } else { - key = insert(0, m.ServiceName, key) - } - } - allowed, labelsFiltered := m.allowMetric(key, labels) - if !allowed { - return - } - now := time.Now() - elapsed := now.Sub(start) - msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) - m.sink.AddSampleWithLabels(key, msec, labelsFiltered) -} - -// UpdateFilter overwrites the existing filter with the given rules. -func (m *Metrics) UpdateFilter(allow, block []string) { - m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels) -} - -// UpdateFilterAndLabels overwrites the existing filter with the given rules. -func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { - m.filterLock.Lock() - defer m.filterLock.Unlock() - - m.AllowedPrefixes = allow - m.BlockedPrefixes = block - - if allowedLabels == nil { - // Having a white list means we take only elements from it - m.allowedLabels = nil - } else { - m.allowedLabels = make(map[string]bool) - for _, v := range allowedLabels { - m.allowedLabels[v] = true - } - } - m.blockedLabels = make(map[string]bool) - for _, v := range blockedLabels { - m.blockedLabels[v] = true - } - m.AllowedLabels = allowedLabels - m.BlockedLabels = blockedLabels - - m.filter = iradix.New() - for _, prefix := range m.AllowedPrefixes { - m.filter, _, _ = m.filter.Insert([]byte(prefix), true) - } - for _, prefix := range m.BlockedPrefixes { - m.filter, _, _ = m.filter.Insert([]byte(prefix), false) - } -} - -func (m *Metrics) Shutdown() { - if ss, ok := m.sink.(ShutdownSink); ok { - ss.Shutdown() - } -} - -// labelIsAllowed return true if a should be included in metric -// the caller should lock m.filterLock while calling this method -func (m *Metrics) labelIsAllowed(label *Label) bool { - labelName := (*label).Name - if m.blockedLabels != nil { - _, ok := m.blockedLabels[labelName] - if ok { - // If present, let's remove this label - return false - } - } - if m.allowedLabels != nil { - _, ok := m.allowedLabels[labelName] - return ok - } - // Allow by default - return true -} - -// filterLabels return only allowed labels -// the caller should lock m.filterLock while calling this method -func (m *Metrics) filterLabels(labels []Label) []Label { - if labels == nil { - return nil - } - toReturn := []Label{} - for _, label := range labels { - if m.labelIsAllowed(&label) { - toReturn = append(toReturn, label) - } - } - return toReturn -} - -// Returns whether the metric should be allowed based on configured prefix filters -// Also return the applicable labels -func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) { - m.filterLock.RLock() - defer m.filterLock.RUnlock() - - if m.filter == nil || m.filter.Len() == 0 { - return m.Config.FilterDefault, m.filterLabels(labels) - } - - _, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, "."))) - if !ok { - return m.Config.FilterDefault, m.filterLabels(labels) - } - - return allowed.(bool), m.filterLabels(labels) -} - -// Periodically collects runtime stats to publish -func (m *Metrics) collectStats() { - for { - time.Sleep(m.ProfileInterval) - m.EmitRuntimeStats() - } -} - -// Emits various runtime statsitics -func (m *Metrics) EmitRuntimeStats() { - // Export number of Goroutines - numRoutines := runtime.NumGoroutine() - m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) - - // Export memory stats - var stats runtime.MemStats - runtime.ReadMemStats(&stats) - m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) - m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) - m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) - m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) - m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) - m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) - m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) - - // Export info about the last few GC runs - num := stats.NumGC - - // Handle wrap around - if num < m.lastNumGC { - m.lastNumGC = 0 - } - - // Ensure we don't scan more than 256 - if num-m.lastNumGC >= 256 { - m.lastNumGC = num - 255 - } - - for i := m.lastNumGC; i < num; i++ { - pause := stats.PauseNs[i%256] - m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) - } - m.lastNumGC = num -} - -// Creates a new slice with the provided string value as the first element -// and the provided slice values as the remaining values. -// Ordering of the values in the provided input slice is kept in tact in the output slice. -func insert(i int, v string, s []string) []string { - // Allocate new slice to avoid modifying the input slice - newS := make([]string, len(s)+1) - - // Copy s[0, i-1] into newS - for j := 0; j < i; j++ { - newS[j] = s[j] - } - - // Insert provided element at index i - newS[i] = v - - // Copy s[i, len(s)-1] into newS starting at newS[i+1] - for j := i; j < len(s); j++ { - newS[j+1] = s[j] - } - - return newS -} diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go deleted file mode 100644 index 6f4108ff..00000000 --- a/vendor/github.com/armon/go-metrics/sink.go +++ /dev/null @@ -1,132 +0,0 @@ -package metrics - -import ( - "fmt" - "net/url" -) - -// The MetricSink interface is used to transmit metrics information -// to an external system -type MetricSink interface { - // A Gauge should retain the last value it is set to - SetGauge(key []string, val float32) - SetGaugeWithLabels(key []string, val float32, labels []Label) - - // Should emit a Key/Value pair for each call - EmitKey(key []string, val float32) - - // Counters should accumulate values - IncrCounter(key []string, val float32) - IncrCounterWithLabels(key []string, val float32, labels []Label) - - // Samples are for timing information, where quantiles are used - AddSample(key []string, val float32) - AddSampleWithLabels(key []string, val float32, labels []Label) -} - -type ShutdownSink interface { - MetricSink - - // Shutdown the metric sink, flush metrics to storage, and cleanup resources. - // Called immediately prior to application exit. Implementations must block - // until metrics are flushed to storage. - Shutdown() -} - -// BlackholeSink is used to just blackhole messages -type BlackholeSink struct{} - -func (*BlackholeSink) SetGauge(key []string, val float32) {} -func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {} -func (*BlackholeSink) EmitKey(key []string, val float32) {} -func (*BlackholeSink) IncrCounter(key []string, val float32) {} -func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {} -func (*BlackholeSink) AddSample(key []string, val float32) {} -func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {} - -// FanoutSink is used to sink to fanout values to multiple sinks -type FanoutSink []MetricSink - -func (fh FanoutSink) SetGauge(key []string, val float32) { - fh.SetGaugeWithLabels(key, val, nil) -} - -func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { - for _, s := range fh { - s.SetGaugeWithLabels(key, val, labels) - } -} - -func (fh FanoutSink) EmitKey(key []string, val float32) { - for _, s := range fh { - s.EmitKey(key, val) - } -} - -func (fh FanoutSink) IncrCounter(key []string, val float32) { - fh.IncrCounterWithLabels(key, val, nil) -} - -func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { - for _, s := range fh { - s.IncrCounterWithLabels(key, val, labels) - } -} - -func (fh FanoutSink) AddSample(key []string, val float32) { - fh.AddSampleWithLabels(key, val, nil) -} - -func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) { - for _, s := range fh { - s.AddSampleWithLabels(key, val, labels) - } -} - -func (fh FanoutSink) Shutdown() { - for _, s := range fh { - if ss, ok := s.(ShutdownSink); ok { - ss.Shutdown() - } - } -} - -// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided -// by each sink type -type sinkURLFactoryFunc func(*url.URL) (MetricSink, error) - -// sinkRegistry supports the generic NewMetricSink function by mapping URL -// schemes to metric sink factory functions -var sinkRegistry = map[string]sinkURLFactoryFunc{ - "statsd": NewStatsdSinkFromURL, - "statsite": NewStatsiteSinkFromURL, - "inmem": NewInmemSinkFromURL, -} - -// NewMetricSinkFromURL allows a generic URL input to configure any of the -// supported sinks. The scheme of the URL identifies the type of the sink, the -// and query parameters are used to set options. -// -// "statsd://" - Initializes a StatsdSink. The host and port are passed through -// as the "addr" of the sink -// -// "statsite://" - Initializes a StatsiteSink. The host and port become the -// "addr" of the sink -// -// "inmem://" - Initializes an InmemSink. The host and port are ignored. The -// "interval" and "duration" query parameters must be specified with valid -// durations, see NewInmemSink for details. -func NewMetricSinkFromURL(urlStr string) (MetricSink, error) { - u, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - - sinkURLFactoryFunc := sinkRegistry[u.Scheme] - if sinkURLFactoryFunc == nil { - return nil, fmt.Errorf( - "cannot create metric sink, unrecognized sink name: %q", u.Scheme) - } - - return sinkURLFactoryFunc(u) -} diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go deleted file mode 100644 index 38976f8d..00000000 --- a/vendor/github.com/armon/go-metrics/start.go +++ /dev/null @@ -1,158 +0,0 @@ -package metrics - -import ( - "os" - "sync" - "sync/atomic" - "time" - - iradix "github.com/hashicorp/go-immutable-radix" -) - -// Config is used to configure metrics settings -type Config struct { - ServiceName string // Prefixed with keys to separate services - HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname - EnableHostname bool // Enable prefixing gauge values with hostname - EnableHostnameLabel bool // Enable adding hostname to labels - EnableServiceLabel bool // Enable adding service to labels - EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) - EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") - TimerGranularity time.Duration // Granularity of timers. - ProfileInterval time.Duration // Interval to profile runtime metrics - - AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator - BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator - AllowedLabels []string // A list of metric labels to allow, with '.' as the separator - BlockedLabels []string // A list of metric labels to block, with '.' as the separator - FilterDefault bool // Whether to allow metrics by default -} - -// Metrics represents an instance of a metrics sink that can -// be used to emit -type Metrics struct { - Config - lastNumGC uint32 - sink MetricSink - filter *iradix.Tree - allowedLabels map[string]bool - blockedLabels map[string]bool - filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access -} - -// Shared global metrics instance -var globalMetrics atomic.Value // *Metrics - -func init() { - // Initialize to a blackhole sink to avoid errors - globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) -} - -// Default returns the shared global metrics instance. -func Default() *Metrics { - return globalMetrics.Load().(*Metrics) -} - -// DefaultConfig provides a sane default configuration -func DefaultConfig(serviceName string) *Config { - c := &Config{ - ServiceName: serviceName, // Use client provided service - HostName: "", - EnableHostname: true, // Enable hostname prefix - EnableRuntimeMetrics: true, // Enable runtime profiling - EnableTypePrefix: false, // Disable type prefix - TimerGranularity: time.Millisecond, // Timers are in milliseconds - ProfileInterval: time.Second, // Poll runtime every second - FilterDefault: true, // Don't filter metrics by default - } - - // Try to get the hostname - name, _ := os.Hostname() - c.HostName = name - return c -} - -// New is used to create a new instance of Metrics -func New(conf *Config, sink MetricSink) (*Metrics, error) { - met := &Metrics{} - met.Config = *conf - met.sink = sink - met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels) - - // Start the runtime collector - if conf.EnableRuntimeMetrics { - go met.collectStats() - } - return met, nil -} - -// NewGlobal is the same as New, but it assigns the metrics object to be -// used globally as well as returning it. -func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { - metrics, err := New(conf, sink) - if err == nil { - globalMetrics.Store(metrics) - } - return metrics, err -} - -// Proxy all the methods to the globalMetrics instance -func SetGauge(key []string, val float32) { - globalMetrics.Load().(*Metrics).SetGauge(key, val) -} - -func SetGaugeWithLabels(key []string, val float32, labels []Label) { - globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels) -} - -func EmitKey(key []string, val float32) { - globalMetrics.Load().(*Metrics).EmitKey(key, val) -} - -func IncrCounter(key []string, val float32) { - globalMetrics.Load().(*Metrics).IncrCounter(key, val) -} - -func IncrCounterWithLabels(key []string, val float32, labels []Label) { - globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels) -} - -func AddSample(key []string, val float32) { - globalMetrics.Load().(*Metrics).AddSample(key, val) -} - -func AddSampleWithLabels(key []string, val float32, labels []Label) { - globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels) -} - -func MeasureSince(key []string, start time.Time) { - globalMetrics.Load().(*Metrics).MeasureSince(key, start) -} - -func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { - globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels) -} - -func UpdateFilter(allow, block []string) { - globalMetrics.Load().(*Metrics).UpdateFilter(allow, block) -} - -// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels -// and blockedLabels - when not nil - allow filtering of labels in order to -// block/allow globally labels (especially useful when having large number of -// values for a given label). See README.md for more information about usage. -func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { - globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels) -} - -// Shutdown disables metric collection, then blocks while attempting to flush metrics to storage. -// WARNING: Not all MetricSink backends support this functionality, and calling this will cause them to leak resources. -// This is intended for use immediately prior to application exit. -func Shutdown() { - m := globalMetrics.Load().(*Metrics) - // Swap whatever MetricSink is currently active with a BlackholeSink. Callers must not have a - // reason to expect that calls to the library will successfully collect metrics after Shutdown - // has been called. - globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) - m.Shutdown() -} diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go deleted file mode 100644 index 1bfffce4..00000000 --- a/vendor/github.com/armon/go-metrics/statsd.go +++ /dev/null @@ -1,184 +0,0 @@ -package metrics - -import ( - "bytes" - "fmt" - "log" - "net" - "net/url" - "strings" - "time" -) - -const ( - // statsdMaxLen is the maximum size of a packet - // to send to statsd - statsdMaxLen = 1400 -) - -// StatsdSink provides a MetricSink that can be used -// with a statsite or statsd metrics server. It uses -// only UDP packets, while StatsiteSink uses TCP. -type StatsdSink struct { - addr string - metricQueue chan string -} - -// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used -// (and tested) from NewMetricSinkFromURL. -func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) { - return NewStatsdSink(u.Host) -} - -// NewStatsdSink is used to create a new StatsdSink -func NewStatsdSink(addr string) (*StatsdSink, error) { - s := &StatsdSink{ - addr: addr, - metricQueue: make(chan string, 4096), - } - go s.flushMetrics() - return s, nil -} - -// Close is used to stop flushing to statsd -func (s *StatsdSink) Shutdown() { - close(s.metricQueue) -} - -func (s *StatsdSink) SetGauge(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { - flatKey := s.flattenKeyLabels(key, labels) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsdSink) EmitKey(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) -} - -func (s *StatsdSink) IncrCounter(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { - flatKey := s.flattenKeyLabels(key, labels) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsdSink) AddSample(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) { - flatKey := s.flattenKeyLabels(key, labels) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -// Flattens the key for formatting, removes spaces -func (s *StatsdSink) flattenKey(parts []string) string { - joined := strings.Join(parts, ".") - return strings.Map(func(r rune) rune { - switch r { - case ':': - fallthrough - case ' ': - return '_' - default: - return r - } - }, joined) -} - -// Flattens the key along with labels for formatting, removes spaces -func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string { - for _, label := range labels { - parts = append(parts, label.Value) - } - return s.flattenKey(parts) -} - -// Does a non-blocking push to the metrics queue -func (s *StatsdSink) pushMetric(m string) { - select { - case s.metricQueue <- m: - default: - } -} - -// Flushes metrics -func (s *StatsdSink) flushMetrics() { - var sock net.Conn - var err error - var wait <-chan time.Time - ticker := time.NewTicker(flushInterval) - defer ticker.Stop() - -CONNECT: - // Create a buffer - buf := bytes.NewBuffer(nil) - - // Attempt to connect - sock, err = net.Dial("udp", s.addr) - if err != nil { - log.Printf("[ERR] Error connecting to statsd! Err: %s", err) - goto WAIT - } - - for { - select { - case metric, ok := <-s.metricQueue: - // Get a metric from the queue - if !ok { - goto QUIT - } - - // Check if this would overflow the packet size - if len(metric)+buf.Len() > statsdMaxLen { - _, err := sock.Write(buf.Bytes()) - buf.Reset() - if err != nil { - log.Printf("[ERR] Error writing to statsd! Err: %s", err) - goto WAIT - } - } - - // Append to the buffer - buf.WriteString(metric) - - case <-ticker.C: - if buf.Len() == 0 { - continue - } - - _, err := sock.Write(buf.Bytes()) - buf.Reset() - if err != nil { - log.Printf("[ERR] Error flushing to statsd! Err: %s", err) - goto WAIT - } - } - } - -WAIT: - // Wait for a while - wait = time.After(time.Duration(5) * time.Second) - for { - select { - // Dequeue the messages to avoid backlog - case _, ok := <-s.metricQueue: - if !ok { - goto QUIT - } - case <-wait: - goto CONNECT - } - } -QUIT: - s.metricQueue = nil -} diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go deleted file mode 100644 index 6c0d284d..00000000 --- a/vendor/github.com/armon/go-metrics/statsite.go +++ /dev/null @@ -1,172 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "net/url" - "strings" - "time" -) - -const ( - // We force flush the statsite metrics after this period of - // inactivity. Prevents stats from getting stuck in a buffer - // forever. - flushInterval = 100 * time.Millisecond -) - -// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used -// (and tested) from NewMetricSinkFromURL. -func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) { - return NewStatsiteSink(u.Host) -} - -// StatsiteSink provides a MetricSink that can be used with a -// statsite metrics server -type StatsiteSink struct { - addr string - metricQueue chan string -} - -// NewStatsiteSink is used to create a new StatsiteSink -func NewStatsiteSink(addr string) (*StatsiteSink, error) { - s := &StatsiteSink{ - addr: addr, - metricQueue: make(chan string, 4096), - } - go s.flushMetrics() - return s, nil -} - -// Close is used to stop flushing to statsite -func (s *StatsiteSink) Shutdown() { - close(s.metricQueue) -} - -func (s *StatsiteSink) SetGauge(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { - flatKey := s.flattenKeyLabels(key, labels) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsiteSink) EmitKey(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) -} - -func (s *StatsiteSink) IncrCounter(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { - flatKey := s.flattenKeyLabels(key, labels) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsiteSink) AddSample(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) { - flatKey := s.flattenKeyLabels(key, labels) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -// Flattens the key for formatting, removes spaces -func (s *StatsiteSink) flattenKey(parts []string) string { - joined := strings.Join(parts, ".") - return strings.Map(func(r rune) rune { - switch r { - case ':': - fallthrough - case ' ': - return '_' - default: - return r - } - }, joined) -} - -// Flattens the key along with labels for formatting, removes spaces -func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string { - for _, label := range labels { - parts = append(parts, label.Value) - } - return s.flattenKey(parts) -} - -// Does a non-blocking push to the metrics queue -func (s *StatsiteSink) pushMetric(m string) { - select { - case s.metricQueue <- m: - default: - } -} - -// Flushes metrics -func (s *StatsiteSink) flushMetrics() { - var sock net.Conn - var err error - var wait <-chan time.Time - var buffered *bufio.Writer - ticker := time.NewTicker(flushInterval) - defer ticker.Stop() - -CONNECT: - // Attempt to connect - sock, err = net.Dial("tcp", s.addr) - if err != nil { - log.Printf("[ERR] Error connecting to statsite! Err: %s", err) - goto WAIT - } - - // Create a buffered writer - buffered = bufio.NewWriter(sock) - - for { - select { - case metric, ok := <-s.metricQueue: - // Get a metric from the queue - if !ok { - goto QUIT - } - - // Try to send to statsite - _, err := buffered.Write([]byte(metric)) - if err != nil { - log.Printf("[ERR] Error writing to statsite! Err: %s", err) - goto WAIT - } - case <-ticker.C: - if err := buffered.Flush(); err != nil { - log.Printf("[ERR] Error flushing to statsite! Err: %s", err) - goto WAIT - } - } - } - -WAIT: - // Wait for a while - wait = time.After(time.Duration(5) * time.Second) - for { - select { - // Dequeue the messages to avoid backlog - case _, ok := <-s.metricQueue: - if !ok { - goto QUIT - } - case <-wait: - goto CONNECT - } - } -QUIT: - s.metricQueue = nil -} diff --git a/vendor/github.com/armon/go-radix/.gitignore b/vendor/github.com/armon/go-radix/.gitignore deleted file mode 100644 index 00268614..00000000 --- a/vendor/github.com/armon/go-radix/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/armon/go-radix/.travis.yml b/vendor/github.com/armon/go-radix/.travis.yml deleted file mode 100644 index 1a0bbea6..00000000 --- a/vendor/github.com/armon/go-radix/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go -go: - - tip diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE deleted file mode 100644 index a5df10e6..00000000 --- a/vendor/github.com/armon/go-radix/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md deleted file mode 100644 index 26f42a28..00000000 --- a/vendor/github.com/armon/go-radix/README.md +++ /dev/null @@ -1,38 +0,0 @@ -go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) -========= - -Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). -The package only provides a single `Tree` implementation, optimized for sparse nodes. - -As a radix tree, it provides the following: - * O(k) operations. In many cases, this can be faster than a hash table since - the hash function is an O(k) operation, and hash tables have very poor cache locality. - * Minimum / Maximum value lookups - * Ordered iteration - -For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). - -Documentation -============= - -The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). - -Example -======= - -Below is a simple example of usage - -```go -// Create a tree -r := radix.New() -r.Insert("foo", 1) -r.Insert("bar", 2) -r.Insert("foobar", 2) - -// Find the longest prefix match -m, _, _ := r.LongestPrefix("foozip") -if m != "foo" { - panic("should be foo") -} -``` - diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go deleted file mode 100644 index e2bb22eb..00000000 --- a/vendor/github.com/armon/go-radix/radix.go +++ /dev/null @@ -1,540 +0,0 @@ -package radix - -import ( - "sort" - "strings" -) - -// WalkFn is used when walking the tree. Takes a -// key and value, returning if iteration should -// be terminated. -type WalkFn func(s string, v interface{}) bool - -// leafNode is used to represent a value -type leafNode struct { - key string - val interface{} -} - -// edge is used to represent an edge node -type edge struct { - label byte - node *node -} - -type node struct { - // leaf is used to store possible leaf - leaf *leafNode - - // prefix is the common prefix we ignore - prefix string - - // Edges should be stored in-order for iteration. - // We avoid a fully materialized slice to save memory, - // since in most cases we expect to be sparse - edges edges -} - -func (n *node) isLeaf() bool { - return n.leaf != nil -} - -func (n *node) addEdge(e edge) { - n.edges = append(n.edges, e) - n.edges.Sort() -} - -func (n *node) updateEdge(label byte, node *node) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - n.edges[idx].node = node - return - } - panic("replacing missing edge") -} - -func (n *node) getEdge(label byte) *node { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - return n.edges[idx].node - } - return nil -} - -func (n *node) delEdge(label byte) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - copy(n.edges[idx:], n.edges[idx+1:]) - n.edges[len(n.edges)-1] = edge{} - n.edges = n.edges[:len(n.edges)-1] - } -} - -type edges []edge - -func (e edges) Len() int { - return len(e) -} - -func (e edges) Less(i, j int) bool { - return e[i].label < e[j].label -} - -func (e edges) Swap(i, j int) { - e[i], e[j] = e[j], e[i] -} - -func (e edges) Sort() { - sort.Sort(e) -} - -// Tree implements a radix tree. This can be treated as a -// Dictionary abstract data type. The main advantage over -// a standard hash map is prefix-based lookups and -// ordered iteration, -type Tree struct { - root *node - size int -} - -// New returns an empty Tree -func New() *Tree { - return NewFromMap(nil) -} - -// NewFromMap returns a new tree containing the keys -// from an existing map -func NewFromMap(m map[string]interface{}) *Tree { - t := &Tree{root: &node{}} - for k, v := range m { - t.Insert(k, v) - } - return t -} - -// Len is used to return the number of elements in the tree -func (t *Tree) Len() int { - return t.size -} - -// longestPrefix finds the length of the shared prefix -// of two strings -func longestPrefix(k1, k2 string) int { - max := len(k1) - if l := len(k2); l < max { - max = l - } - var i int - for i = 0; i < max; i++ { - if k1[i] != k2[i] { - break - } - } - return i -} - -// Insert is used to add a newentry or update -// an existing entry. Returns if updated. -func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { - var parent *node - n := t.root - search := s - for { - // Handle key exhaution - if len(search) == 0 { - if n.isLeaf() { - old := n.leaf.val - n.leaf.val = v - return old, true - } - - n.leaf = &leafNode{ - key: s, - val: v, - } - t.size++ - return nil, false - } - - // Look for the edge - parent = n - n = n.getEdge(search[0]) - - // No edge, create one - if n == nil { - e := edge{ - label: search[0], - node: &node{ - leaf: &leafNode{ - key: s, - val: v, - }, - prefix: search, - }, - } - parent.addEdge(e) - t.size++ - return nil, false - } - - // Determine longest prefix of the search key on match - commonPrefix := longestPrefix(search, n.prefix) - if commonPrefix == len(n.prefix) { - search = search[commonPrefix:] - continue - } - - // Split the node - t.size++ - child := &node{ - prefix: search[:commonPrefix], - } - parent.updateEdge(search[0], child) - - // Restore the existing node - child.addEdge(edge{ - label: n.prefix[commonPrefix], - node: n, - }) - n.prefix = n.prefix[commonPrefix:] - - // Create a new leaf node - leaf := &leafNode{ - key: s, - val: v, - } - - // If the new key is a subset, add to to this node - search = search[commonPrefix:] - if len(search) == 0 { - child.leaf = leaf - return nil, false - } - - // Create a new edge for the node - child.addEdge(edge{ - label: search[0], - node: &node{ - leaf: leaf, - prefix: search, - }, - }) - return nil, false - } -} - -// Delete is used to delete a key, returning the previous -// value and if it was deleted -func (t *Tree) Delete(s string) (interface{}, bool) { - var parent *node - var label byte - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if !n.isLeaf() { - break - } - goto DELETE - } - - // Look for an edge - parent = n - label = search[0] - n = n.getEdge(label) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false - -DELETE: - // Delete the leaf - leaf := n.leaf - n.leaf = nil - t.size-- - - // Check if we should delete this node from the parent - if parent != nil && len(n.edges) == 0 { - parent.delEdge(label) - } - - // Check if we should merge this node - if n != t.root && len(n.edges) == 1 { - n.mergeChild() - } - - // Check if we should merge the parent's other child - if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { - parent.mergeChild() - } - - return leaf.val, true -} - -// DeletePrefix is used to delete the subtree under a prefix -// Returns how many nodes were deleted -// Use this to delete large subtrees efficiently -func (t *Tree) DeletePrefix(s string) int { - return t.deletePrefix(nil, t.root, s) -} - -// delete does a recursive deletion -func (t *Tree) deletePrefix(parent, n *node, prefix string) int { - // Check for key exhaustion - if len(prefix) == 0 { - // Remove the leaf node - subTreeSize := 0 - //recursively walk from all edges of the node to be deleted - recursiveWalk(n, func(s string, v interface{}) bool { - subTreeSize++ - return false - }) - if n.isLeaf() { - n.leaf = nil - } - n.edges = nil // deletes the entire subtree - - // Check if we should merge the parent's other child - if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { - parent.mergeChild() - } - t.size -= subTreeSize - return subTreeSize - } - - // Look for an edge - label := prefix[0] - child := n.getEdge(label) - if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { - return 0 - } - - // Consume the search prefix - if len(child.prefix) > len(prefix) { - prefix = prefix[len(prefix):] - } else { - prefix = prefix[len(child.prefix):] - } - return t.deletePrefix(n, child, prefix) -} - -func (n *node) mergeChild() { - e := n.edges[0] - child := e.node - n.prefix = n.prefix + child.prefix - n.leaf = child.leaf - n.edges = child.edges -} - -// Get is used to lookup a specific key, returning -// the value and if it was found -func (t *Tree) Get(s string) (interface{}, bool) { - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if n.isLeaf() { - return n.leaf.val, true - } - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false -} - -// LongestPrefix is like Get, but instead of an -// exact match, it will return the longest prefix match. -func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { - var last *leafNode - n := t.root - search := s - for { - // Look for a leaf node - if n.isLeaf() { - last = n.leaf - } - - // Check for key exhaution - if len(search) == 0 { - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - if last != nil { - return last.key, last.val, true - } - return "", nil, false -} - -// Minimum is used to return the minimum value in the tree -func (t *Tree) Minimum() (string, interface{}, bool) { - n := t.root - for { - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } - if len(n.edges) > 0 { - n = n.edges[0].node - } else { - break - } - } - return "", nil, false -} - -// Maximum is used to return the maximum value in the tree -func (t *Tree) Maximum() (string, interface{}, bool) { - n := t.root - for { - if num := len(n.edges); num > 0 { - n = n.edges[num-1].node - continue - } - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } - break - } - return "", nil, false -} - -// Walk is used to walk the tree -func (t *Tree) Walk(fn WalkFn) { - recursiveWalk(t.root, fn) -} - -// WalkPrefix is used to walk the tree under a prefix -func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { - n := t.root - search := prefix - for { - // Check for key exhaution - if len(search) == 0 { - recursiveWalk(n, fn) - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - - } else if strings.HasPrefix(n.prefix, search) { - // Child may be under our search prefix - recursiveWalk(n, fn) - return - } else { - break - } - } - -} - -// WalkPath is used to walk the tree, but only visiting nodes -// from the root down to a given leaf. Where WalkPrefix walks -// all the entries *under* the given prefix, this walks the -// entries *above* the given prefix. -func (t *Tree) WalkPath(path string, fn WalkFn) { - n := t.root - search := path - for { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return - } - - // Check for key exhaution - if len(search) == 0 { - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - return - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } -} - -// recursiveWalk is used to do a pre-order walk of a node -// recursively. Returns true if the walk should be aborted -func recursiveWalk(n *node, fn WalkFn) bool { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return true - } - - // Recurse on the children - for _, e := range n.edges { - if recursiveWalk(e.node, fn) { - return true - } - } - return false -} - -// ToMap is used to walk the tree and convert it into a map -func (t *Tree) ToMap() map[string]interface{} { - out := make(map[string]interface{}, t.size) - t.Walk(func(k string, v interface{}) bool { - out[k] = v - return false - }) - return out -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md index 197c118e..c3512dd7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md @@ -1,3 +1,3477 @@ +# Release (2023-06-13) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.27.0](service/cloudtrail/CHANGELOG.md#v1270-2023-06-13) + * **Feature**: This feature allows users to view dashboards for CloudTrail Lake event data stores. +* `github.com/aws/aws-sdk-go-v2/service/codegurusecurity`: [v1.0.0](service/codegurusecurity/CHANGELOG.md#v100-2023-06-13) + * **Release**: New AWS service client module + * **Feature**: Initial release of Amazon CodeGuru Security APIs +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.14.0](service/drs/CHANGELOG.md#v1140-2023-06-13) + * **Feature**: Added APIs to support network replication and recovery using AWS Elastic Disaster Recovery. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.100.0](service/ec2/CHANGELOG.md#v11000-2023-06-13) + * **Feature**: This release introduces a new feature, EC2 Instance Connect Endpoint, that enables you to connect to a resource over TCP, without requiring the resource to have a public IPv4 address. +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.23.5](service/imagebuilder/CHANGELOG.md#v1235-2023-06-13) + * **Documentation**: Change the Image Builder ImagePipeline dateNextRun field to more accurately describe the data. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.27.0](service/lightsail/CHANGELOG.md#v1270-2023-06-13) + * **Feature**: This release adds pagination for the Get Certificates API operation. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.34.0](service/s3/CHANGELOG.md#v1340-2023-06-13) + * **Feature**: Integrate double encryption feature to SDKs. + * **Bug Fix**: Fix HeadObject to return types.Nound when an object does not exist. Fixes [2084](https://github.com/aws/aws-sdk-go-v2/issues/2084) +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.33.0](service/securityhub/CHANGELOG.md#v1330-2023-06-13) + * **Feature**: Add support for Security Hub Automation Rules +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.3.0](service/simspaceweaver/CHANGELOG.md#v130-2023-06-13) + * **Feature**: This release fixes using aws-us-gov ARNs in API calls and adds documentation for snapshot APIs. +* `github.com/aws/aws-sdk-go-v2/service/verifiedpermissions`: [v1.0.0](service/verifiedpermissions/CHANGELOG.md#v100-2023-06-13) + * **Release**: New AWS service client module + * **Feature**: GA release of Amazon Verified Permissions. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.35.0](service/wafv2/CHANGELOG.md#v1350-2023-06-13) + * **Feature**: You can now detect and block fraudulent account creation attempts with the new AWS WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.21.0](service/wellarchitected/CHANGELOG.md#v1210-2023-06-13) + * **Feature**: AWS Well-Architected now supports Profiles that help customers prioritize which questions to focus on first by providing a list of prioritized questions that are better aligned with their business goals and outcomes. + +# Release (2023-06-12) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.11.0](service/amplifyuibuilder/CHANGELOG.md#v1110-2023-06-12) + * **Feature**: AWS Amplify UIBuilder is launching Codegen UI, a new feature that enables you to generate your amplify uibuilder components and forms. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.19.8](service/dynamodb/CHANGELOG.md#v1198-2023-06-12) + * **Documentation**: Documentation updates for DynamoDB +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.14.12](service/dynamodbstreams/CHANGELOG.md#v11412-2023-06-12) + * **Documentation**: Documentation updates for DynamoDB Streams +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.29.0](service/fsx/CHANGELOG.md#v1290-2023-06-12) + * **Feature**: Amazon FSx for NetApp ONTAP now supports joining a storage virtual machine (SVM) to Active Directory after the SVM has been created. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.18.0](service/opensearch/CHANGELOG.md#v1180-2023-06-12) + * **Feature**: This release adds support for SkipUnavailable connection property for cross cluster search +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.29.0](service/rekognition/CHANGELOG.md#v1290-2023-06-12) + * **Feature**: This release adds support for improved accuracy with user vector in Amazon Rekognition Face Search. Adds new APIs: AssociateFaces, CreateUser, DeleteUser, DisassociateFaces, ListUsers, SearchUsers, SearchUsersByImage. Also adds new face metadata that can be stored: user vector. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.84.0](service/sagemaker/CHANGELOG.md#v1840-2023-06-12) + * **Feature**: Sagemaker Neo now supports compilation for inferentia2 (ML_INF2) and Trainium1 (ML_TRN1) as available targets. With these devices, you can run your workloads at highest performance with lowest cost. inferentia2 (ML_INF2) is available in CMH and Trainium1 (ML_TRN1) is available in IAD currently + +# Release (2023-06-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.21.13](service/acmpca/CHANGELOG.md#v12113-2023-06-09) + * **Documentation**: Document-only update to refresh CLI documentation for AWS Private CA. No change to the service. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.58.0](service/connect/CHANGELOG.md#v1580-2023-06-09) + * **Feature**: This release adds search APIs for Prompts, Quick Connects and Hours of Operations, which can be used to search for those resources within a Connect Instance. + +# Release (2023-06-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.30.0](service/athena/CHANGELOG.md#v1300-2023-06-08) + * **Feature**: You can now define custom spark properties at start of the session for use cases like cluster encryption, table formats, and general Spark tuning. +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.16.0](service/comprehendmedical/CHANGELOG.md#v1160-2023-06-08) + * **Feature**: This release supports a new set of entities and traits. +* `github.com/aws/aws-sdk-go-v2/service/paymentcryptography`: [v1.0.0](service/paymentcryptography/CHANGELOG.md#v100-2023-06-08) + * **Release**: New AWS service client module + * **Feature**: Initial release of AWS Payment Cryptography Control Plane service for creating and managing cryptographic keys used during card payment processing. +* `github.com/aws/aws-sdk-go-v2/service/paymentcryptographydata`: [v1.0.0](service/paymentcryptographydata/CHANGELOG.md#v100-2023-06-08) + * **Release**: New AWS service client module + * **Feature**: Initial release of AWS Payment Cryptography DataPlane Plane service for performing cryptographic operations typically used during card payment processing. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.19.0](service/servicecatalog/CHANGELOG.md#v1190-2023-06-08) + * **Feature**: New parameter added in ServiceCatalog DescribeProvisioningArtifact api - IncludeProvisioningArtifactParameters. This parameter can be used to return information about the parameters used to provision the product +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.17.0](service/timestreamwrite/CHANGELOG.md#v1170-2023-06-08) + * **Feature**: This release adds the capability for customers to define how their data should be partitioned, optimizing for certain access patterns. This definition will take place as a part of the table creation. + +# Release (2023-06-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.29.0](service/cloudformation/CHANGELOG.md#v1290-2023-06-07) + * **Feature**: AWS CloudFormation StackSets is updating the deployment experience for all stackset operations to skip suspended AWS accounts during deployments. StackSets will skip target AWS accounts that are suspended and set the Detailed Status of the corresponding stack instances as SKIPPED_SUSPENDED_ACCOUNT +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.21.0](service/cloudwatchlogs/CHANGELOG.md#v1210-2023-06-07) + * **Feature**: This change adds support for account level data protection policies using 3 new APIs, PutAccountPolicy, DeleteAccountPolicy and DescribeAccountPolicy. DescribeLogGroup API has been modified to indicate if account level policy is applied to the LogGroup via "inheritedProperties" list in the response. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.25.0](service/customerprofiles/CHANGELOG.md#v1250-2023-06-07) + * **Feature**: This release introduces event stream related APIs. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.15](service/directconnect/CHANGELOG.md#v11815-2023-06-07) + * **Documentation**: This update corrects the jumbo frames mtu values from 9100 to 8500 for transit virtual interfaces. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.19.0](service/emrcontainers/CHANGELOG.md#v1190-2023-06-07) + * **Feature**: EMR on EKS adds support for log rotation of Spark container logs with EMR-6.11.0 onwards, to the StartJobRun API. +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.19.0](service/iotdeviceadvisor/CHANGELOG.md#v1190-2023-06-07) + * **Feature**: AWS IoT Core Device Advisor now supports new Qualification Suite test case list. With this update, customers can more easily create new qualification test suite with an empty rootGroup input. + +# Release (2023-06-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.57.1](service/connect/CHANGELOG.md#v1571-2023-06-06) + * **Documentation**: GetMetricDataV2 API is now available in AWS GovCloud(US) region. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.26.0](service/emr/CHANGELOG.md#v1260-2023-06-06) + * **Feature**: This release provides customers the ability to specify an allocation strategies amongst PRICE_CAPACITY_OPTIMIZED, CAPACITY_OPTIMIZED, LOWEST_PRICE, DIVERSIFIED for Spot instances in Instance Feet cluster. This enables customers to choose an allocation strategy best suited for their workload. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.20.0](service/iam/CHANGELOG.md#v1200-2023-06-06) + * **Feature**: This release updates the AccountAlias regex pattern with the same length restrictions enforced by the length constraint. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.14.0](service/inspector2/CHANGELOG.md#v1140-2023-06-06) + * **Feature**: Adds new response properties and request parameters for 'last scanned at' on the ListCoverage operation. This feature allows you to search and view the date of which your resources were last scanned by Inspector. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.38.0](service/iot/CHANGELOG.md#v1380-2023-06-06) + * **Feature**: Adding IoT Device Management Software Package Catalog APIs to register, store, and report system software packages, along with their versions and metadata in a centralized location. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.29.0](service/lexmodelsv2/CHANGELOG.md#v1290-2023-06-06) + * **Feature**: This release adds support for Lex Developers to create test sets and to execute those test-sets against their bots. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.37.0](service/quicksight/CHANGELOG.md#v1370-2023-06-06) + * **Feature**: QuickSight support for pivot table field collapse state, radar chart range scale and multiple scope options in conditional formatting. +* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.15.0](service/signer/CHANGELOG.md#v1150-2023-06-06) + * **Feature**: AWS Signer is launching Container Image Signing, a new feature that enables you to sign and verify container images. This feature enables you to validate that only container images you approve are used in your enterprise. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.23.0](service/sqs/CHANGELOG.md#v1230-2023-06-06) + * **Feature**: Amazon SQS adds three new APIs - StartMessageMoveTask, CancelMessageMoveTask, and ListMessageMoveTasks to automate redriving messages from dead-letter queues to source queues or a custom destination. + +# Release (2023-06-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.28.0](service/cloudformation/CHANGELOG.md#v1280-2023-06-05) + * **Feature**: AWS CloudFormation StackSets provides customers with three new APIs to activate, deactivate, and describe AWS Organizations trusted access which is needed to get started with service-managed StackSets. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.99.0](service/ec2/CHANGELOG.md#v1990-2023-06-05) + * **Feature**: Making InstanceTagAttribute as the required parameter for the DeregisterInstanceEventNotificationAttributes and RegisterInstanceEventNotificationAttributes APIs. +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.10.0](service/finspace/CHANGELOG.md#v1100-2023-06-05) + * **Feature**: Releasing new Managed kdb Insights APIs +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.25.0](service/frauddetector/CHANGELOG.md#v1250-2023-06-05) + * **Feature**: Added new variable types, new DateTime data type, and new rules engine functions for interacting and working with DateTime data types. +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.3.0](service/keyspaces/CHANGELOG.md#v130-2023-06-05) + * **Feature**: This release adds support for MRR GA launch, and includes multiregion support in create-keyspace, get-keyspace, and list-keyspace. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.22.0](service/kms/CHANGELOG.md#v1220-2023-06-05) + * **Feature**: This release includes feature to import customer's asymmetric (RSA and ECC) and HMAC keys into KMS. It also includes feature to allow customers to specify number of days to schedule a KMS key deletion as a policy condition key. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.35.0](service/lambda/CHANGELOG.md#v1350-2023-06-05) + * **Feature**: Add Ruby 3.2 (ruby3.2) Runtime support to AWS Lambda. +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.16.0](service/mwaa/CHANGELOG.md#v1160-2023-06-05) + * **Feature**: This release adds ROLLING_BACK and CREATING_SNAPSHOT environment statuses for Amazon MWAA environments. + +# Release (2023-06-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.29.0](service/athena/CHANGELOG.md#v1290-2023-06-02) + * **Feature**: This release introduces the DeleteCapacityReservation API and the ability to manage capacity reservations using CloudFormation +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.26.0](service/cloudtrail/CHANGELOG.md#v1260-2023-06-02) + * **Feature**: This feature allows users to start and stop event ingestion on a CloudTrail Lake event data store. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.83.0](service/sagemaker/CHANGELOG.md#v1830-2023-06-02) + * **Feature**: This release adds Selective Execution feature that allows SageMaker Pipelines users to run selected steps in a pipeline. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.34.0](service/wafv2/CHANGELOG.md#v1340-2023-06-02) + * **Feature**: Added APIs to describe managed products. The APIs retrieve information about rule groups that are managed by AWS and by AWS Marketplace sellers. + +# Release (2023-06-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.15.11](service/alexaforbusiness/CHANGELOG.md#v11511-2023-06-01) + * **Documentation**: Alexa for Business has been deprecated and is no longer supported. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.30.0](service/appflow/CHANGELOG.md#v1300-2023-06-01) + * **Feature**: Added ability to select DataTransferApiType for DescribeConnector and CreateFlow requests when using Async supported connectors. Added supportedDataTransferType to DescribeConnector/DescribeConnectors/ListConnector response. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.24.0](service/customerprofiles/CHANGELOG.md#v1240-2023-06-01) + * **Feature**: This release introduces calculated attribute related APIs. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.22.0](service/ivs/CHANGELOG.md#v1220-2023-06-01) + * **Feature**: API Update for IVS Advanced Channel type +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.82.1](service/sagemaker/CHANGELOG.md#v1821-2023-06-01) + * **Documentation**: Amazon Sagemaker Autopilot adds support for Parquet file input to NLP text classification jobs. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.33.1](service/wafv2/CHANGELOG.md#v1331-2023-06-01) + * **Documentation**: Corrected the information for the header order FieldToMatch setting + +# Release (2023-05-31) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.33.0](service/configservice/CHANGELOG.md#v1330-2023-05-31) + * **Feature**: Resource Types Exclusion feature launch by AWS Config +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.24.0](service/frauddetector/CHANGELOG.md#v1240-2023-05-31) + * **Feature**: This release enables publishing event predictions from Amazon Fraud Detector (AFD) to Amazon EventBridge. For example, after getting predictions from AFD, Amazon EventBridge rules can be configured to trigger notification through an SNS topic, send a message with SES, or trigger Lambda workflows. +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.16.0](service/healthlake/CHANGELOG.md#v1160-2023-05-31) + * **Feature**: This release adds a new request parameter to the CreateFHIRDatastore API operation. IdentityProviderConfiguration specifies how you want to authenticate incoming requests to your Healthlake Data Store. +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.5.0](service/m2/CHANGELOG.md#v150-2023-05-31) + * **Feature**: Adds an optional create-only 'roleArn' property to Application resources. Enables PS and PO data set org types. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.45.0](service/rds/CHANGELOG.md#v1450-2023-05-31) + * **Feature**: This release adds support for changing the engine for Oracle using the ModifyDbInstance API +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.18.5](service/servicecatalog/CHANGELOG.md#v1185-2023-05-31) + * **Documentation**: Documentation updates for ServiceCatalog. +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.10.0](service/workspacesweb/CHANGELOG.md#v1100-2023-05-31) + * **Feature**: WorkSpaces Web now allows you to control which IP addresses your WorkSpaces Web portal may be accessed from. + +# Release (2023-05-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.6.0](service/chimesdkvoice/CHANGELOG.md#v160-2023-05-30) + * **Feature**: Added optional CallLeg field to StartSpeakerSearchTask API request +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.50.0](service/glue/CHANGELOG.md#v1500-2023-05-30) + * **Feature**: Added Runtime parameter to allow selection of Ray Runtime +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.18.3](service/groundstation/CHANGELOG.md#v1183-2023-05-30) + * **Documentation**: Updating description of GetMinuteUsage to be clearer. +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.4.0](service/iotfleetwise/CHANGELOG.md#v140-2023-05-30) + * **Feature**: Campaigns now support selecting Timestream or S3 as the data destination, Signal catalogs now support "Deprecation" keyword released in VSS v2.1 and "Comment" keyword released in VSS v3.0 +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.23.0](service/location/CHANGELOG.md#v1230-2023-05-30) + * **Feature**: This release adds API support for political views for the maps service APIs: CreateMap, UpdateMap, DescribeMap. +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.13.0](service/memorydb/CHANGELOG.md#v1130-2023-05-30) + * **Feature**: Amazon MemoryDB for Redis now supports AWS Identity and Access Management authentication access to Redis clusters starting with redis-engine version 7.0 +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.24.0](service/personalize/CHANGELOG.md#v1240-2023-05-30) + * **Feature**: This release provides support for the exclusion of certain columns for training when creating a solution and creating or updating a recommender with Amazon Personalize. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.26.0](service/polly/CHANGELOG.md#v1260-2023-05-30) + * **Feature**: Amazon Polly adds 2 new voices - Sofie (da-DK) and Niamh (en-IE) +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.32.0](service/securityhub/CHANGELOG.md#v1320-2023-05-30) + * **Feature**: Added new resource detail objects to ASFF, including resources for AwsGuardDutyDetector, AwsAmazonMqBroker, AwsEventSchemasRegistry, AwsAppSyncGraphQlApi and AwsStepFunctionStateMachine. +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.4.0](service/securitylake/CHANGELOG.md#v140-2023-05-30) + * **Feature**: Log sources are now versioned. AWS log sources and custom sources will now come with a version identifier that enables producers to vend multiple schema versions to subscribers. Security Lake API have been refactored to more closely align with AWS API conventions. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.33.0](service/wafv2/CHANGELOG.md#v1330-2023-05-30) + * **Feature**: This SDK release provides customers the ability to use Header Order as a field to match. + +# Release (2023-05-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.57.0](service/connect/CHANGELOG.md#v1570-2023-05-26) + * **Feature**: Documentation update for a new Initiation Method value in DescribeContact API +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.28.0](service/iotwireless/CHANGELOG.md#v1280-2023-05-26) + * **Feature**: Add Multicast Group support in Network Analyzer Configuration. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.82.0](service/sagemaker/CHANGELOG.md#v1820-2023-05-26) + * **Feature**: Added ml.p4d and ml.inf1 as supported instance type families for SageMaker Notebook Instances. + +# Release (2023-05-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.21.0](service/applicationautoscaling/CHANGELOG.md#v1210-2023-05-25) + * **Feature**: With this release, ElastiCache customers will be able to use predefined metricType "ElastiCacheDatabaseCapacityUsageCountedForEvictPercentage" for their ElastiCache instances. +* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.15.0](service/codepipeline/CHANGELOG.md#v1150-2023-05-25) + * **Feature**: Add PollingDisabledAt time information in PipelineMetadata object of GetPipeline API. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.19.0](service/gamelift/CHANGELOG.md#v1190-2023-05-25) + * **Feature**: GameLift FleetIQ users can now filter game server claim requests to exclude servers on instances that are draining. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.49.0](service/glue/CHANGELOG.md#v1490-2023-05-25) + * **Feature**: Added ability to create data quality rulesets for shared, cross-account Glue Data Catalog tables. Added support for dataset comparison rules through a new parameter called AdditionalDataSources. Enhanced the data quality results with a map containing profiled metric values. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.10.0](service/migrationhubrefactorspaces/CHANGELOG.md#v1100-2023-05-25) + * **Feature**: This SDK update allows for path parameter syntax to be passed to the CreateRoute API. Path parameter syntax require parameters to be enclosed in {} characters. This update also includes a new AppendSourcePath field which lets users forward the source path to the Service URL endpoint. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.81.0](service/sagemaker/CHANGELOG.md#v1810-2023-05-25) + * **Feature**: Amazon SageMaker Automatic Model Tuning now supports enabling Autotune for tuning jobs which can choose tuning job configurations. + +# Release (2023-05-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.21.0](service/appsync/CHANGELOG.md#v1210-2023-05-24) + * **Feature**: This release introduces AppSync Merged APIs, which provide the ability to compose multiple source APIs into a single federated/merged API. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.56.0](service/connect/CHANGELOG.md#v1560-2023-05-24) + * **Feature**: Amazon Connect Evaluation Capabilities: validation improvements +* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.16.0](service/costandusagereportservice/CHANGELOG.md#v1160-2023-05-24) + * **Feature**: Add support for split cost allocation data on a report. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.80.0](service/sagemaker/CHANGELOG.md#v1800-2023-05-24) + * **Feature**: SageMaker now provides an instantaneous deployment recommendation through the DescribeModel API + +# Release (2023-05-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.24.0](service/fms/CHANGELOG.md#v1240-2023-05-23) + * **Feature**: Fixes issue that could cause calls to GetAdminScope and ListAdminAccountsForOrganization to return a 500 Internal Server error. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.79.0](service/sagemaker/CHANGELOG.md#v1790-2023-05-23) + * **Feature**: Added ModelNameEquals, ModelPackageVersionArnEquals in request and ModelName, SamplePayloadUrl, ModelPackageVersionArn in response of ListInferenceRecommendationsJobs API. Added Invocation timestamps in response of DescribeInferenceRecommendationsJob API & ListInferenceRecommendationsJobSteps API. +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.18.0](service/translate/CHANGELOG.md#v1180-2023-05-23) + * **Feature**: Added support for calling TranslateDocument API. + +# Release (2023-05-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.22.0](service/backup/CHANGELOG.md#v1220-2023-05-22) + * **Feature**: Added support for tags on restore. +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.19.2](service/pinpoint/CHANGELOG.md#v1192-2023-05-22) + * **Documentation**: Amazon Pinpoint is deprecating the tags parameter in the UpdateSegment, UpdateCampaign, UpdateEmailTemplate, UpdateSmsTemplate, UpdatePushTemplate, UpdateInAppTemplate and UpdateVoiceTemplate. Amazon Pinpoint will end support tags parameter by May 22, 2023. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.36.0](service/quicksight/CHANGELOG.md#v1360-2023-05-22) + * **Feature**: Add support for Asset Bundle, Geospatial Heatmaps. + +# Release (2023-05-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.21.0](service/backup/CHANGELOG.md#v1210-2023-05-19) + * **Feature**: Add ResourceArn, ResourceType, and BackupVaultName to ListRecoveryPointsByLegalHold API response. +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.4.0](service/connectcases/CHANGELOG.md#v140-2023-05-19) + * **Feature**: This release adds the ability to create fields with type Url through the CreateField API. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/mediapackagev2`: [v1.0.0](service/mediapackagev2/CHANGELOG.md#v100-2023-05-19) + * **Release**: New AWS service client module + * **Feature**: Adds support for the MediaPackage Live v2 API +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.18.0](service/sesv2/CHANGELOG.md#v1180-2023-05-19) + * **Feature**: This release allows customers to update scaling mode property of dedicated IP pools with PutDedicatedIpPoolScalingAttributes call. + +# Release (2023-05-18) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.28.0](service/athena/CHANGELOG.md#v1280-2023-05-18) + * **Feature**: Removing SparkProperties from EngineConfiguration object for StartSession API call +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.25.0](service/cloudtrail/CHANGELOG.md#v1250-2023-05-18) + * **Feature**: Add ConflictException to PutEventSelectors, add (Channel/EDS)ARNInvalidException to Tag APIs. These exceptions provide customers with more specific error messages instead of internal errors. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.24.0](service/computeoptimizer/CHANGELOG.md#v1240-2023-05-18) + * **Feature**: In this launch, we add support for showing integration status with external metric providers such as Instana, Datadog ...etc in GetEC2InstanceRecommendations and ExportEC2InstanceRecommendations apis +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.55.0](service/connect/CHANGELOG.md#v1550-2023-05-18) + * **Feature**: You can programmatically create and manage prompts using APIs, for example, to extract prompts stored within Amazon Connect and add them to your Amazon S3 bucket. AWS CloudTrail, AWS CloudFormation and tagging are supported. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.98.0](service/ec2/CHANGELOG.md#v1980-2023-05-18) + * **Feature**: Add support for i4g.large, i4g.xlarge, i4g.2xlarge, i4g.4xlarge, i4g.8xlarge and i4g.16xlarge instances powered by AWS Graviton2 processors that deliver up to 15% better compute performance than our other storage-optimized instances. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.27.1](service/ecs/CHANGELOG.md#v1271-2023-05-18) + * **Documentation**: Documentation only release to address various tickets. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.37.0](service/mediaconvert/CHANGELOG.md#v1370-2023-05-18) + * **Feature**: This release introduces a new MXF Profile for XDCAM which is strictly compliant with the SMPTE RDD 9 standard and improved handling of output name modifiers. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.44.1](service/rds/CHANGELOG.md#v1441-2023-05-18) + * **Documentation**: RDS documentation update for the EngineVersion parameter of ModifyDBSnapshot +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.3.0](service/sagemakergeospatial/CHANGELOG.md#v130-2023-05-18) + * **Feature**: This release makes ExecutionRoleArn a required field in the StartEarthObservationJob API. + +# Release (2023-05-16) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.19.0](service/detective/CHANGELOG.md#v1190-2023-05-16) + * **Feature**: Added and updated API operations in Detective to support the integration of ASFF Security Hub findings. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.14](service/directconnect/CHANGELOG.md#v11814-2023-05-16) + * **Documentation**: This release includes an update to the mtu value for CreateTransitVirtualInterface from 9001 mtu to 8500 mtu. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.48.0](service/glue/CHANGELOG.md#v1480-2023-05-16) + * **Feature**: Add Support for Tags for Custom Entity Types +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.19.8](service/secretsmanager/CHANGELOG.md#v1198-2023-05-16) + * **Documentation**: Documentation updates for Secrets Manager +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.32.0](service/wafv2/CHANGELOG.md#v1320-2023-05-16) + * **Feature**: My AWS Service (placeholder) - You can now rate limit web requests based on aggregation keys other than IP addresses, and you can aggregate using combinations of keys. You can also rate limit all requests that match a scope-down statement, without further aggregation. + +# Release (2023-05-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.27.0](service/athena/CHANGELOG.md#v1270-2023-05-15) + * **Feature**: You can now define custom spark properties at start of the session for use cases like cluster encryption, table formats, and general Spark tuning. +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.3.0](service/codecatalyst/CHANGELOG.md#v130-2023-05-15) + * **Feature**: With this release, the users can list the active sessions connected to their Dev Environment on AWS CodeCatalyst +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.28.0](service/rekognition/CHANGELOG.md#v1280-2023-05-15) + * **Feature**: This release adds a new EyeDirection attribute in Amazon Rekognition DetectFaces and IndexFaces APIs which predicts the yaw and pitch angles of a person's eye gaze direction for each face detected in the image. +* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.2.0](service/rolesanywhere/CHANGELOG.md#v120-2023-05-15) + * **Feature**: Adds support for custom notification settings in a trust anchor. Introduces PutNotificationSettings and ResetNotificationSettings API's. Updates DurationSeconds max value to 3600. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.29.0](service/transfer/CHANGELOG.md#v1290-2023-05-15) + * **Feature**: This release introduces the ability to require both password and SSH key when users authenticate to your Transfer Family servers that use the SFTP protocol. + +# Release (2023-05-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.54.2](service/connect/CHANGELOG.md#v1542-2023-05-11) + * **Documentation**: This release updates GetMetricDataV2 API, to support metric data up-to last 35 days +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.27.0](service/elasticache/CHANGELOG.md#v1270-2023-05-11) + * **Feature**: Added support to modify the cluster mode configuration for the existing ElastiCache ReplicationGroups. Customers can now modify the configuration from cluster mode disabled to cluster mode enabled. +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.19.0](service/elasticsearchservice/CHANGELOG.md#v1190-2023-05-11) + * **Feature**: This release fixes DescribePackages API error with null filter value parameter. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.17.0](service/health/CHANGELOG.md#v1170-2023-05-11) + * **Feature**: Add support for regional endpoints +* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.2.0](service/ivsrealtime/CHANGELOG.md#v120-2023-05-11) + * **Feature**: Add methods for inspecting and debugging stages: ListStageSessions, GetStageSession, ListParticipants, GetParticipant, and ListParticipantEvents. +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.4.0](service/omics/CHANGELOG.md#v140-2023-05-11) + * **Feature**: This release provides support for Ready2Run and GPU workflows, an improved read set filter, the direct upload of read sets into Omics Storage, and annotation parsing for analytics stores. +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.15.0](service/support/CHANGELOG.md#v1150-2023-05-11) + * **Feature**: This release adds 2 new Support APIs, DescribeCreateCaseOptions and DescribeSupportedLanguages. You can use these new APIs to get available support languages. + +# Release (2023-05-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.25.0](service/emr/CHANGELOG.md#v1250-2023-05-10) + * **Feature**: EMR Studio now supports programmatically executing a Notebooks on an EMR on EKS cluster. In addition, notebooks can now be executed by specifying its location in S3. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.44.0](service/rds/CHANGELOG.md#v1440-2023-05-10) + * **Feature**: Amazon Relational Database Service (RDS) updates for the new Aurora I/O-Optimized storage type for Amazon Aurora DB clusters +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.15.0](service/swf/CHANGELOG.md#v1150-2023-05-10) + * **Feature**: This release adds a new API parameter to exclude old history events from decision tasks. + +# Release (2023-05-09) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.20.0](service/applicationautoscaling/CHANGELOG.md#v1200-2023-05-09) + * **Feature**: With this release, Amazon SageMaker Serverless Inference customers can use Application Auto Scaling to auto scale the provisioned concurrency of their serverless endpoints. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.47.0](service/glue/CHANGELOG.md#v1470-2023-05-09) + * **Feature**: This release adds AmazonRedshift Source and Target nodes in addition to DynamicTransform OutputSchemas +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.78.0](service/sagemaker/CHANGELOG.md#v1780-2023-05-09) + * **Feature**: This release includes support for (1) Provisioned Concurrency for Amazon SageMaker Serverless Inference and (2) UpdateEndpointWeightsAndCapacities API for Serverless endpoints. + +# Release (2023-05-08) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.46.0](service/glue/CHANGELOG.md#v1460-2023-05-08) + * **Feature**: Support large worker types G.4x and G.8x for Glue Spark +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.23.0](service/guardduty/CHANGELOG.md#v1230-2023-05-08) + * **Feature**: Add AccessDeniedException 403 Error message code to support 3 Tagging related APIs +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.29.0](service/iotsitewise/CHANGELOG.md#v1290-2023-05-08) + * **Feature**: Provide support for 20,000 max results for GetAssetPropertyValueHistory/BatchGetAssetPropertyValueHistory and 15 minute aggregate resolution for GetAssetPropertyAggregates/BatchGetAssetPropertyAggregates +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.19.0](service/sts/CHANGELOG.md#v1190-2023-05-08) + * **Feature**: Documentation updates for AWS Security Token Service. + +# Release (2023-05-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.97.0](service/ec2/CHANGELOG.md#v1970-2023-05-05) + * **Feature**: This release adds support the inf2 and trn1n instances. inf2 instances are purpose built for deep learning inference while trn1n instances are powered by AWS Trainium accelerators and they build on the capabilities of Trainium-powered trn1 instances. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.13.0](service/inspector2/CHANGELOG.md#v1130-2023-05-05) + * **Feature**: Amazon Inspector now allows customers to search its vulnerability intelligence database if any of the Inspector scanning types are activated. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.23.0](service/mediatailor/CHANGELOG.md#v1230-2023-05-05) + * **Feature**: This release adds support for AFTER_LIVE_EDGE mode configuration for avail suppression, and adding a fill-policy setting that sets the avail suppression to PARTIAL_AVAIL or FULL_AVAIL_ONLY when AFTER_LIVE_EDGE is enabled. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.22.0](service/sqs/CHANGELOG.md#v1220-2023-05-05) + * **Feature**: Revert previous SQS protocol change. + +# Release (2023-05-04) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.26.0](service/cloudwatch/CHANGELOG.md#v1260-2023-05-04) + * **Feature**: Adds support for filtering by metric names in CloudWatch Metric Streams. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.32.0](service/configservice/CHANGELOG.md#v1320-2023-05-04) + * **Feature**: Updated ResourceType enum with new resource types onboarded by AWS Config in April 2023. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.54.1](service/connect/CHANGELOG.md#v1541-2023-05-04) + * **Documentation**: Remove unused InvalidParameterException from CreateParticipant API +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.27.0](service/ecs/CHANGELOG.md#v1270-2023-05-04) + * **Feature**: Documentation update for new error type NamespaceNotFoundException for CreateCluster and UpdateCluster +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.28.0](service/networkfirewall/CHANGELOG.md#v1280-2023-05-04) + * **Feature**: This release adds support for the Suricata REJECT option in midstream exception configurations. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.17.0](service/opensearch/CHANGELOG.md#v1170-2023-05-04) + * **Feature**: DescribeDomainNodes: A new API that provides configuration information for nodes part of the domain +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.35.0](service/quicksight/CHANGELOG.md#v1350-2023-05-04) + * **Feature**: Add support for Topic, Dataset parameters and VPC +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.27.0](service/rekognition/CHANGELOG.md#v1270-2023-05-04) + * **Feature**: This release adds a new attribute FaceOccluded. Additionally, you can now select attributes individually (e.g. ["DEFAULT", "FACE_OCCLUDED", "AGE_RANGE"] instead of ["ALL"]), which can reduce response time. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.33.1](service/s3/CHANGELOG.md#v1331-2023-05-04) + * **Documentation**: Documentation updates for Amazon S3 +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.77.0](service/sagemaker/CHANGELOG.md#v1770-2023-05-04) + * **Feature**: We added support for ml.inf2 and ml.trn1 family of instances on Amazon SageMaker for deploying machine learning (ML) models for Real-time and Asynchronous inference. You can use these instances to achieve high performance at a low cost for generative artificial intelligence (AI) models. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.31.0](service/securityhub/CHANGELOG.md#v1310-2023-05-04) + * **Feature**: Add support for Finding History. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.21.0](service/sqs/CHANGELOG.md#v1210-2023-05-04) + * **Feature**: This release enables customers to call SQS using AWS JSON-1.0 protocol. + +# Release (2023-05-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.20.0](service/appsync/CHANGELOG.md#v1200-2023-05-03) + * **Feature**: Private API support for AWS AppSync. With Private APIs, you can now create GraphQL APIs that can only be accessed from your Amazon Virtual Private Cloud ("VPC"). +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.96.0](service/ec2/CHANGELOG.md#v1960-2023-05-03) + * **Feature**: Adds an SDK paginator for GetNetworkInsightsAccessScopeAnalysisFindings +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.12.0](service/inspector2/CHANGELOG.md#v1120-2023-05-03) + * **Feature**: This feature provides deep inspection for linux based instance +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.12.0](service/iottwinmaker/CHANGELOG.md#v1120-2023-05-03) + * **Feature**: This release adds a field for GetScene API to return error code and message from dependency services. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.27.0](service/networkfirewall/CHANGELOG.md#v1270-2023-05-03) + * **Feature**: AWS Network Firewall now supports policy level HOME_NET variable overrides. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.16.0](service/opensearch/CHANGELOG.md#v1160-2023-05-03) + * **Feature**: Amazon OpenSearch Service adds the option to deploy a domain across multiple Availability Zones, with each AZ containing a complete copy of data and with nodes in one AZ acting as a standby. This option provides 99.99% availability and consistent performance in the event of infrastructure failure. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.20.0](service/wellarchitected/CHANGELOG.md#v1200-2023-05-03) + * **Feature**: This release deepens integration with AWS Service Catalog AppRegistry to improve workload resource discovery. + +# Release (2023-05-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.29.0](service/appflow/CHANGELOG.md#v1290-2023-05-02) + * **Feature**: This release adds new API to cancel flow executions. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.54.0](service/connect/CHANGELOG.md#v1540-2023-05-02) + * **Feature**: Amazon Connect Service Rules API update: Added OnContactEvaluationSubmit event source to support user configuring evaluation form rules. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.26.3](service/ecs/CHANGELOG.md#v1263-2023-05-02) + * **Documentation**: Documentation only update to address Amazon ECS tickets. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.40.0](service/kendra/CHANGELOG.md#v1400-2023-05-02) + * **Feature**: AWS Kendra now supports configuring document fields/attributes via the GetQuerySuggestions API. You can now base query suggestions on the contents of document fields. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.11.0](service/resiliencehub/CHANGELOG.md#v1110-2023-05-02) + * **Feature**: This release will improve resource level transparency in applications by discovering previously hidden resources. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.76.0](service/sagemaker/CHANGELOG.md#v1760-2023-05-02) + * **Feature**: Amazon Sagemaker Autopilot supports training models with sample weights and additional objective metrics. + +# Release (2023-05-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.23.0](service/computeoptimizer/CHANGELOG.md#v1230-2023-05-01) + * **Feature**: support for tag filtering within compute optimizer. ability to filter recommendation results by tag and tag key value pairs. ability to filter by inferred workload type added. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.21.0](service/kms/CHANGELOG.md#v1210-2023-05-01) + * **Feature**: This release makes the NitroEnclave request parameter Recipient and the response field for CiphertextForRecipient available in AWS SDKs. It also adds the regex pattern for CloudHsmClusterId validation. + +# Release (2023-04-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.28.0](service/appflow/CHANGELOG.md#v1280-2023-04-28) + * **Feature**: Adds Jwt Support for Salesforce Credentials. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.26.0](service/athena/CHANGELOG.md#v1260-2023-04-28) + * **Feature**: You can now use capacity reservations on Amazon Athena to run SQL queries on fully-managed compute capacity. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.12](service/directconnect/CHANGELOG.md#v11812-2023-04-28) + * **Documentation**: This release corrects the jumbo frames MTU from 9100 to 8500. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.20.0](service/efs/CHANGELOG.md#v1200-2023-04-28) + * **Feature**: This release adds PAUSED and PAUSING state as a returned value for DescribeReplicationConfigurations response. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.13.0](service/grafana/CHANGELOG.md#v1130-2023-04-28) + * **Feature**: This release adds support for the grafanaVersion parameter in CreateWorkspace. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.37.0](service/iot/CHANGELOG.md#v1370-2023-04-28) + * **Feature**: This release allows AWS IoT Core users to specify a TLS security policy when creating and updating AWS IoT Domain Configurations. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.26.0](service/rekognition/CHANGELOG.md#v1260-2023-04-28) + * **Feature**: Added support for aggregating moderation labels by video segment timestamps for Stored Video Content Moderation APIs and added additional information about the job to all Stored Video Get API responses. +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.2.0](service/simspaceweaver/CHANGELOG.md#v120-2023-04-28) + * **Feature**: Added a new CreateSnapshot API. For the StartSimulation API, SchemaS3Location is now optional, added a new SnapshotS3Location parameter. For the DescribeSimulation API, added SNAPSHOT_IN_PROGRESS simulation state, deprecated SchemaError, added new fields: StartError and SnapshotS3Location. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.31.0](service/wafv2/CHANGELOG.md#v1310-2023-04-28) + * **Feature**: You can now associate a web ACL with a Verified Access instance. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.11](service/workspaces/CHANGELOG.md#v12811-2023-04-28) + * **Documentation**: Added Windows 11 to support Microsoft_Office_2019 + +# Release (2023-04-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.95.0](service/ec2/CHANGELOG.md#v1950-2023-04-27) + * **Feature**: This release adds support for AMD SEV-SNP on EC2 instances. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.18.0](service/emrcontainers/CHANGELOG.md#v1180-2023-04-27) + * **Feature**: This release adds GetManagedEndpointSessionCredentials, a new API that allows customers to generate an auth token to connect to a managed endpoint, enabling features such as self-hosted Jupyter notebooks for EMR on EKS. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.22.0](service/guardduty/CHANGELOG.md#v1220-2023-04-27) + * **Feature**: Added API support to initiate on-demand malware scan on specific resources. +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.18.0](service/iotdeviceadvisor/CHANGELOG.md#v1180-2023-04-27) + * **Feature**: AWS IoT Core Device Advisor now supports MQTT over WebSocket. With this update, customers can run all three test suites of AWS IoT Core Device Advisor - qualification, custom, and long duration tests - using Signature Version 4 for MQTT over WebSocket. +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.20.0](service/kafka/CHANGELOG.md#v1200-2023-04-27) + * **Feature**: Amazon MSK has added new APIs that allows multi-VPC private connectivity and cluster policy support for Amazon MSK clusters that simplify connectivity and access between your Apache Kafka clients hosted in different VPCs and AWS accounts and your Amazon MSK clusters. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.34.0](service/lambda/CHANGELOG.md#v1340-2023-04-27) + * **Feature**: Add Java 17 (java17) support to AWS Lambda +* `github.com/aws/aws-sdk-go-v2/service/osis`: [v1.0.1](service/osis/CHANGELOG.md#v101-2023-04-27) + * **Documentation**: Documentation updates for OpenSearch Ingestion +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.15.10](service/qldb/CHANGELOG.md#v11510-2023-04-27) + * **Documentation**: Documentation updates for Amazon QLDB +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.75.0](service/sagemaker/CHANGELOG.md#v1750-2023-04-27) + * **Feature**: Added ml.p4d.24xlarge and ml.p4de.24xlarge as supported instances for SageMaker Studio + +# Release (2023-04-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/osis`: [v1.0.0](service/osis/CHANGELOG.md#v100-2023-04-26) + * **Release**: New AWS service client module + * **Feature**: Initial release for OpenSearch Ingestion + +# Release (2023-04-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.15.0](service/chimesdkmessaging/CHANGELOG.md#v1150-2023-04-25) + * **Feature**: Remove non actionable field from UpdateChannelReadMarker and DeleteChannelRequest. Add precise exceptions to DeleteChannel and DeleteStreamingConfigurations error cases. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.53.0](service/connect/CHANGELOG.md#v1530-2023-04-25) + * **Feature**: Amazon Connect, Contact Lens Evaluation API release including ability to manage forms and to submit contact evaluations. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.24.0](service/datasync/CHANGELOG.md#v1240-2023-04-25) + * **Feature**: This release adds 13 new APIs to support AWS DataSync Discovery GA. +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.17.0](service/directoryservice/CHANGELOG.md#v1170-2023-04-25) + * **Feature**: New field added in AWS Managed Microsoft AD DescribeSettings response and regex pattern update for UpdateSettings value. Added length validation to RemoteDomainName. +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.19.0](service/pinpoint/CHANGELOG.md#v1190-2023-04-25) + * **Feature**: Adds support for journey runs and querying journey execution metrics based on journey runs. Adds execution metrics to campaign activities. Updates docs for Advanced Quiet Time. + +# Release (2023-04-24) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.18.0 + * **Feature**: add recursion detection middleware to all SDK requests to avoid recursion invocation in Lambda +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.27.0](service/appflow/CHANGELOG.md#v1270-2023-04-24) + * **Feature**: Increased the max length for RefreshToken and AuthCode from 2048 to 4096. +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.2.5](service/codecatalyst/CHANGELOG.md#v125-2023-04-24) + * **Documentation**: Documentation updates for Amazon CodeCatalyst. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.94.0](service/ec2/CHANGELOG.md#v1940-2023-04-24) + * **Feature**: API changes to AWS Verified Access related to identity providers' information. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.36.0](service/mediaconvert/CHANGELOG.md#v1360-2023-04-24) + * **Feature**: This release introduces a noise reduction pre-filter, linear interpolation deinterlace mode, video pass-through, updated default job settings, and expanded LC-AAC Stereo audio bitrate ranges. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.25.0](service/rekognition/CHANGELOG.md#v1250-2023-04-24) + * **Feature**: Added new status result to Liveness session status. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.28.0](service/route53/CHANGELOG.md#v1280-2023-04-24) + * **Feature**: added paginator for listResourceRecordSets +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.33.0](service/s3/CHANGELOG.md#v1330-2023-04-24) + * **Feature**: added custom paginators for listMultipartUploads and ListObjectVersions + +# Release (2023-04-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.52.0](service/connect/CHANGELOG.md#v1520-2023-04-21) + * **Feature**: This release adds a new API CreateParticipant. For Amazon Connect Chat, you can use this new API to customize chat flow experiences. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.26.1](service/ecs/CHANGELOG.md#v1261-2023-04-21) + * **Documentation**: Documentation update to address various Amazon ECS tickets. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.23.0](service/fms/CHANGELOG.md#v1230-2023-04-21) + * **Feature**: AWS Firewall Manager adds support for multiple administrators. You can now delegate more than one administrator per organization. + +# Release (2023-04-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.23.0](service/chime/CHANGELOG.md#v1230-2023-04-20) + * **Feature**: Adds support for Hindi and Thai languages and additional Amazon Transcribe parameters to the StartMeetingTranscription API. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.4.0](service/chimesdkmediapipelines/CHANGELOG.md#v140-2023-04-20) + * **Feature**: This release adds support for specifying the recording file format in an S3 recording sink configuration. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.15.0](service/chimesdkmeetings/CHANGELOG.md#v1150-2023-04-20) + * **Feature**: Adds support for Hindi and Thai languages and additional Amazon Transcribe parameters to the StartMeetingTranscription API. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.18.0](service/gamelift/CHANGELOG.md#v1180-2023-04-20) + * **Feature**: Amazon GameLift supports creating Builds for Windows 2016 operating system. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.21.0](service/guardduty/CHANGELOG.md#v1210-2023-04-20) + * **Feature**: This release adds support for the new Lambda Protection feature. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.36.0](service/iot/CHANGELOG.md#v1360-2023-04-20) + * **Feature**: Support additional OTA states in GetOTAUpdate API +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.74.0](service/sagemaker/CHANGELOG.md#v1740-2023-04-20) + * **Feature**: Amazon SageMaker Canvas adds ModelRegisterSettings support for CanvasAppSettings. +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.19.0](service/snowball/CHANGELOG.md#v1190-2023-04-20) + * **Feature**: Adds support for Amazon S3 compatible storage. AWS Snow Family customers can now use Amazon S3 compatible storage on Snowball Edge devices. Also adds support for V3_5S. This is a refreshed AWS Snowball Edge Storage Optimized device type with 210TB SSD (customer usable). +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.30.0](service/wafv2/CHANGELOG.md#v1300-2023-04-20) + * **Feature**: You can now create encrypted API keys to use in a client application integration of the JavaScript CAPTCHA API . You can also retrieve a list of your API keys and the JavaScript application integration URL. + +# Release (2023-04-19) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.24.0](service/comprehend/CHANGELOG.md#v1240-2023-04-19) + * **Feature**: This release supports native document models for custom classification, in addition to plain-text models. You train native document models using documents (PDF, Word, images) in their native format. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.26.0](service/ecs/CHANGELOG.md#v1260-2023-04-19) + * **Feature**: This release supports the Account Setting "TagResourceAuthorization" that allows for enhanced Tagging security controls. +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.18.0](service/ram/CHANGELOG.md#v1180-2023-04-19) + * **Feature**: This release adds support for customer managed permissions. Customer managed permissions enable customers to author and manage tailored permissions for resources shared using RAM. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.43.1](service/rds/CHANGELOG.md#v1431-2023-04-19) + * **Documentation**: Adds support for the ImageId parameter of CreateCustomDBEngineVersion to RDS Custom for Oracle +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.32.0](service/s3/CHANGELOG.md#v1320-2023-04-19) + * **Feature**: Provides support for "Snow" Storage class. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.19.4](service/secretsmanager/CHANGELOG.md#v1194-2023-04-19) + * **Documentation**: Documentation updates for Secrets Manager + +# Release (2023-04-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.26.0](service/appflow/CHANGELOG.md#v1260-2023-04-17) + * **Feature**: This release adds a Client Token parameter to the following AppFlow APIs: Create/Update Connector Profile, Create/Update Flow, Start Flow, Register Connector, Update Connector Registration. The Client Token parameter allows idempotent operations for these APIs. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.13.0](service/drs/CHANGELOG.md#v1130-2023-04-17) + * **Feature**: Changed existing APIs and added new APIs to support using an account-level launch configuration template with AWS Elastic Disaster Recovery. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.19.5](service/dynamodb/CHANGELOG.md#v1195-2023-04-17) + * **Documentation**: Documentation updates for DynamoDB API +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.7.0](service/emrserverless/CHANGELOG.md#v170-2023-04-17) + * **Feature**: The GetJobRun API has been updated to include the job's billed resource utilization. This utilization shows the aggregate vCPU, memory and storage that AWS has billed for the job run. The billed resources include a 1-minute minimum usage for workers, plus additional storage over 20 GB per worker. +* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.2.0](service/internetmonitor/CHANGELOG.md#v120-2023-04-17) + * **Feature**: This release includes a new configurable value, TrafficPercentageToMonitor, which allows users to adjust the amount of traffic monitored by percentage +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.27.0](service/iotwireless/CHANGELOG.md#v1270-2023-04-17) + * **Feature**: Supports the new feature of LoRaWAN roaming, allows to configure MaxEirp for LoRaWAN gateway, and allows to configure PingSlotPeriod for LoRaWAN multicast group +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.33.0](service/lambda/CHANGELOG.md#v1330-2023-04-17) + * **Feature**: Add Python 3.10 (python3.10) support to AWS Lambda + +# Release (2023-04-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.25.1](service/ecs/CHANGELOG.md#v1251-2023-04-14) + * **Documentation**: This release supports ephemeral storage for AWS Fargate Windows containers. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.32.0](service/lambda/CHANGELOG.md#v1320-2023-04-14) + * **Feature**: This release adds SnapStart related exceptions to InvokeWithResponseStream API. IAM access related documentation is also added for this API. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.9.8](service/migrationhubrefactorspaces/CHANGELOG.md#v198-2023-04-14) + * **Documentation**: Doc only update for Refactor Spaces environments without network bridge feature. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.43.0](service/rds/CHANGELOG.md#v1430-2023-04-14) + * **Feature**: This release adds support of modifying the engine mode of database clusters. + +# Release (2023-04-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.5.0](service/chimesdkvoice/CHANGELOG.md#v150-2023-04-13) + * **Feature**: This release adds tagging support for Voice Connectors and SIP Media Applications +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.19.0](service/mediaconnect/CHANGELOG.md#v1190-2023-04-13) + * **Feature**: Gateway is a new feature of AWS Elemental MediaConnect. Gateway allows the deployment of on-premises resources for the purpose of transporting live video to and from the AWS Cloud. + +# Release (2023-04-12) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.18.0](service/groundstation/CHANGELOG.md#v1180-2023-04-12) + * **Feature**: AWS Ground Station Wideband DigIF GA Release +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.15.5](service/managedblockchain/CHANGELOG.md#v1155-2023-04-12) + * **Documentation**: Removal of the Ropsten network. The Ethereum foundation ceased support of Ropsten on December 31st, 2022.. + +# Release (2023-04-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.16.0](service/ecrpublic/CHANGELOG.md#v1160-2023-04-11) + * **Feature**: This release will allow using registry alias as registryId in BatchDeleteImage request. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.6.0](service/emrserverless/CHANGELOG.md#v160-2023-04-11) + * **Feature**: This release extends GetJobRun API to return job run timeout (executionTimeoutMinutes) specified during StartJobRun call (or default timeout of 720 minutes if none was specified). +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.19.0](service/eventbridge/CHANGELOG.md#v1190-2023-04-11) + * **Feature**: EventBridge PutTarget support for multiple SQL arguments on RedshiftDataParameters +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.15.0](service/iotdataplane/CHANGELOG.md#v1150-2023-04-11) + * **Feature**: This release adds support for MQTT5 user properties when calling the AWS IoT GetRetainedMessage API +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.29.0](service/wafv2/CHANGELOG.md#v1290-2023-04-11) + * **Feature**: For web ACLs that protect CloudFront protections, the default request body inspection size is now 16 KB, and you can use the new association configuration to increase the inspection size further, up to 64 KB. Sizes over 16 KB can incur additional costs. + +# Release (2023-04-10) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.51.0](service/connect/CHANGELOG.md#v1510-2023-04-10) + * **Feature**: This release adds the ability to configure an agent's routing profile to receive contacts from multiple channels at the same time via extending the UpdateRoutingProfileConcurrency, CreateRoutingProfile and DescribeRoutingProfile APIs. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.25.0](service/ecs/CHANGELOG.md#v1250-2023-04-10) + * **Feature**: This release adds support for enabling FIPS compliance on Amazon ECS Fargate tasks +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.16.0](service/marketplacecatalog/CHANGELOG.md#v1160-2023-04-10) + * **Feature**: Added three new APIs to support resource sharing: GetResourcePolicy, PutResourcePolicy, and DeleteResourcePolicy. Added new OwnershipType field to ListEntities request to let users filter on entities that are shared with them. Increased max page size of ListEntities response from 20 to 50 results. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.35.0](service/mediaconvert/CHANGELOG.md#v1350-2023-04-10) + * **Feature**: AWS Elemental MediaConvert SDK now supports conversion of 608 paint-on captions to pop-on captions for SCC sources. +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.3.0](service/omics/CHANGELOG.md#v130-2023-04-10) + * **Feature**: Remove unexpected API changes. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.24.0](service/rekognition/CHANGELOG.md#v1240-2023-04-10) + * **Feature**: This release adds support for Face Liveness APIs in Amazon Rekognition. Updates UpdateStreamProcessor to return ResourceInUseException Exception. Minor updates to API documentation. + +# Release (2023-04-07) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.15.0](service/dlm/CHANGELOG.md#v1150-2023-04-07) + * **Announcement**: This release includes breaking changes for the timestamp trait on the data lifecycle management client. + * **Feature**: Updated timestamp format for GetLifecyclePolicy API + * **Bug Fix**: Correct timestamp type for data lifecycle manager. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.21.0](service/docdb/CHANGELOG.md#v1210-2023-04-07) + * **Feature**: This release adds a new parameter 'DBClusterParameterGroupName' to 'RestoreDBClusterFromSnapshot' API to associate the name of the DB cluster parameter group while performing restore. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.28.8](service/fsx/CHANGELOG.md#v1288-2023-04-07) + * **Documentation**: Amazon FSx for Lustre now supports creating data repository associations on Persistent_1 and Scratch_2 file systems. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.31.0](service/lambda/CHANGELOG.md#v1310-2023-04-07) + * **Feature**: This release adds a new Lambda InvokeWithResponseStream API to support streaming Lambda function responses. The release also adds a new InvokeMode parameter to Function Url APIs to control whether the response will be streamed or buffered. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.34.0](service/quicksight/CHANGELOG.md#v1340-2023-04-07) + * **Feature**: This release has two changes: adding the OR condition to tag-based RLS rules in CreateDataSet and UpdateDataSet; adding RefreshSchedule and Incremental RefreshProperties operations for users to programmatically configure SPICE dataset ingestions. +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.19.3](service/redshiftdata/CHANGELOG.md#v1193-2023-04-07) + * **Documentation**: Update documentation of API descriptions as needed in support of temporary credentials with IAM identity. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.18.1](service/servicecatalog/CHANGELOG.md#v1181-2023-04-07) + * **Documentation**: Updates description for property + +# Release (2023-04-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.27.0](service/cloudformation/CHANGELOG.md#v1270-2023-04-06) + * **Feature**: Including UPDATE_COMPLETE as a failed status for DeleteStack waiter. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.22.0](service/greengrassv2/CHANGELOG.md#v1220-2023-04-06) + * **Feature**: Add support for SUCCEEDED value in coreDeviceExecutionStatus field. Documentation updates for Greengrass V2. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.21.0](service/proton/CHANGELOG.md#v1210-2023-04-06) + * **Feature**: This release adds support for the AWS Proton service sync feature. Service sync enables managing an AWS Proton service (creating and updating instances) and all of it's corresponding service instances from a Git repository. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.42.1](service/rds/CHANGELOG.md#v1421-2023-04-06) + * **Documentation**: Adds and updates the SDK examples + +# Release (2023-04-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.31.0](service/configservice/CHANGELOG.md#v1310-2023-04-05) + * **Feature**: This release adds resourceType enums for types released in March 2023. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.24.3](service/ecs/CHANGELOG.md#v1243-2023-04-05) + * **Documentation**: This is a document only updated to add information about Amazon Elastic Inference (EI). +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.16.7](service/identitystore/CHANGELOG.md#v1167-2023-04-05) + * **Documentation**: Documentation updates for Identity Store CLI command reference. +* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.1.0](service/ivsrealtime/CHANGELOG.md#v110-2023-04-05) + * **Feature**: Fix ParticipantToken ExpirationTime format +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.26.0](service/networkfirewall/CHANGELOG.md#v1260-2023-04-05) + * **Feature**: AWS Network Firewall now supports IPv6-only subnets. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.18.0](service/servicecatalog/CHANGELOG.md#v1180-2023-04-05) + * **Feature**: removed incorrect product type value +* `github.com/aws/aws-sdk-go-v2/service/vpclattice`: [v1.0.1](service/vpclattice/CHANGELOG.md#v101-2023-04-05) + * **Documentation**: This release removes the entities in the API doc model package for auth policies. + +# Release (2023-04-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.10.0](service/amplifyuibuilder/CHANGELOG.md#v1100-2023-04-04) + * **Feature**: Support StorageField and custom displays for data-bound options in form builder. Support non-string operands for predicates in collections. Support choosing client to get token from. +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.28.1](service/autoscaling/CHANGELOG.md#v1281-2023-04-04) + * **Documentation**: Documentation updates for Amazon EC2 Auto Scaling +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.93.0](service/ec2/CHANGELOG.md#v1930-2023-04-04) + * **Feature**: C6in, M6in, M6idn, R6in and R6idn bare metal instances are powered by 3rd Generation Intel Xeon Scalable processors and offer up to 200 Gbps of network bandwidth. +* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.13.0](service/elasticinference/CHANGELOG.md#v1130-2023-04-04) + * **Feature**: Updated public documentation for the Describe and Tagging APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.73.0](service/sagemaker/CHANGELOG.md#v1730-2023-04-04) + * **Feature**: Amazon SageMaker Asynchronous Inference now allows customer's to receive failure model responses in S3 and receive success/failure model responses in SNS notifications. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.19.0](service/sagemakerruntime/CHANGELOG.md#v1190-2023-04-04) + * **Feature**: Amazon SageMaker Asynchronous Inference now provides customers a FailureLocation as a response parameter in InvokeEndpointAsync API to capture the model failure responses. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.28.0](service/wafv2/CHANGELOG.md#v1280-2023-04-04) + * **Feature**: This release rolls back association config feature for webACLs that protect CloudFront protections. + +# Release (2023-04-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.45.0](service/glue/CHANGELOG.md#v1450-2023-04-03) + * **Feature**: Add support for database-level federation +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.21.0](service/lakeformation/CHANGELOG.md#v1210-2023-04-03) + * **Feature**: Add support for database-level federation +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.18.0](service/licensemanager/CHANGELOG.md#v1180-2023-04-03) + * **Feature**: This release adds grant override options to the CreateGrantVersion API. These options can be used to specify grant replacement behavior during grant activation. +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.15.0](service/mwaa/CHANGELOG.md#v1150-2023-04-03) + * **Feature**: This Amazon MWAA release adds the ability to customize the Apache Airflow environment by launching a shell script at startup. This shell script is hosted in your environment's Amazon S3 bucket. Amazon MWAA runs the script before installing requirements and initializing the Apache Airflow process. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.17.0](service/servicecatalog/CHANGELOG.md#v1170-2023-04-03) + * **Feature**: This release introduces Service Catalog support for Terraform open source. It enables 1. The notify* APIs to Service Catalog. These APIs are used by the terraform engine to notify the result of the provisioning engine execution. 2. Adds a new TERRAFORM_OPEN_SOURCE product type in CreateProduct API. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.27.0](service/wafv2/CHANGELOG.md#v1270-2023-04-03) + * **Feature**: For web ACLs that protect CloudFront protections, the default request body inspection size is now 16 KB, and you can use the new association configuration to increase the inspection size further, up to 64 KB. Sizes over 16 KB can incur additional costs. + +# Release (2023-03-31) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.92.1](service/ec2/CHANGELOG.md#v1921-2023-03-31) + * **Documentation**: Documentation updates for EC2 On Demand Capacity Reservations +* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.1.0](service/internetmonitor/CHANGELOG.md#v110-2023-03-31) + * **Feature**: This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to deliver internet measurements to Amazon S3 buckets as well as CloudWatch Logs. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.10.1](service/resiliencehub/CHANGELOG.md#v1101-2023-03-31) + * **Documentation**: Adding EKS related documentation for appTemplateBody +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.31.1](service/s3/CHANGELOG.md#v1311-2023-03-31) + * **Documentation**: Documentation updates for Amazon S3 +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.14.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1140-2023-03-31) + * **Feature**: In this release, you can now chose between soft delete and hard delete when calling the DeleteRecord API, so you have more flexibility when it comes to managing online store data. + +# Release (2023-03-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.25.0](service/athena/CHANGELOG.md#v1250-2023-03-30) + * **Feature**: Make DefaultExecutorDpuSize and CoordinatorDpuSize fields optional in StartSession +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.28.0](service/autoscaling/CHANGELOG.md#v1280-2023-03-30) + * **Feature**: Amazon EC2 Auto Scaling now supports Elastic Load Balancing traffic sources with the AttachTrafficSources, DetachTrafficSources, and DescribeTrafficSources APIs. This release also introduces a new activity status, "WaitingForConnectionDraining", for VPC Lattice to the DescribeScalingActivities API. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.23.0](service/batch/CHANGELOG.md#v1230-2023-03-30) + * **Feature**: This feature allows Batch on EKS to support configuration of Pod Labels through Metadata for Batch on EKS Jobs. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.22.0](service/computeoptimizer/CHANGELOG.md#v1220-2023-03-30) + * **Feature**: This release adds support for HDD EBS volume types and io2 Block Express. We are also adding support for 61 new instance types and instances that have non consecutive runtime. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.12.0](service/drs/CHANGELOG.md#v1120-2023-03-30) + * **Feature**: Adding a field to the replication configuration APIs to support the auto replicate new disks feature. We also deprecated RetryDataReplication. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.92.0](service/ec2/CHANGELOG.md#v1920-2023-03-30) + * **Feature**: This release adds support for Tunnel Endpoint Lifecycle control, a new feature that provides Site-to-Site VPN customers with better visibility and control of their VPN tunnel maintenance updates. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.24.0](service/emr/CHANGELOG.md#v1240-2023-03-30) + * **Feature**: Updated DescribeCluster and ListClusters API responses to include ErrorDetail that specifies error code, programmatically accessible error data,and an error message. ErrorDetail provides the underlying reason for cluster failure and recommends actions to simplify troubleshooting of EMR clusters. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.44.0](service/glue/CHANGELOG.md#v1440-2023-03-30) + * **Feature**: This release adds support for AWS Glue Data Quality, which helps you evaluate and monitor the quality of your data and includes the API for creating, deleting, or updating data quality rulesets, runs and evaluations. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.20.0](service/guardduty/CHANGELOG.md#v1200-2023-03-30) + * **Feature**: Added EKS Runtime Monitoring feature support to existing detector, finding APIs and introducing new Coverage APIs +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.23.0](service/imagebuilder/CHANGELOG.md#v1230-2023-03-30) + * **Feature**: Adds support for new image workflow details and image vulnerability detection. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.21.0](service/ivs/CHANGELOG.md#v1210-2023-03-30) + * **Feature**: Amazon Interactive Video Service (IVS) now offers customers the ability to configure IVS channels to allow insecure RTMP ingest. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.39.0](service/kendra/CHANGELOG.md#v1390-2023-03-30) + * **Feature**: AWS Kendra now supports featured results for a query. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.25.0](service/networkfirewall/CHANGELOG.md#v1250-2023-03-30) + * **Feature**: AWS Network Firewall added TLS inspection configurations to allow TLS traffic inspection. +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.2.0](service/sagemakergeospatial/CHANGELOG.md#v120-2023-03-30) + * **Feature**: Amazon SageMaker geospatial capabilities now supports server-side encryption with customer managed KMS key and SageMaker notebooks with a SageMaker geospatial image in a Amazon SageMaker Domain with VPC only mode. +* `github.com/aws/aws-sdk-go-v2/service/vpclattice`: [v1.0.0](service/vpclattice/CHANGELOG.md#v100-2023-03-30) + * **Release**: New AWS service client module + * **Feature**: General Availability (GA) release of Amazon VPC Lattice +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.19.0](service/wellarchitected/CHANGELOG.md#v1190-2023-03-30) + * **Feature**: AWS Well-Architected SDK now supports getting consolidated report metrics and generating a consolidated report PDF. + +# Release (2023-03-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.2.0](service/opensearchserverless/CHANGELOG.md#v120-2023-03-29) + * **Feature**: This release includes two new exception types "ServiceQuotaExceededException" and "OcuLimitExceededException". +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.42.0](service/rds/CHANGELOG.md#v1420-2023-03-29) + * **Feature**: Add support for creating a read replica DB instance from a Multi-AZ DB cluster. + +# Release (2023-03-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.15.0](service/ssmcontacts/CHANGELOG.md#v1150-2023-03-28) + * **Feature**: This release adds 12 new APIs as part of Oncall Schedule feature release, adds support for a new contact type: ONCALL_SCHEDULE. Check public documentation for AWS ssm-contacts for more information +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.21.0](service/ssmincidents/CHANGELOG.md#v1210-2023-03-28) + * **Feature**: Increased maximum length of "TriggerDetails.rawData" to 10K characters and "IncidentSummary" to 8K characters. + +# Release (2023-03-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.24.0](service/athena/CHANGELOG.md#v1240-2023-03-27) + * **Feature**: Enforces a minimal level of encryption for the workgroup for query and calculation results that are written to Amazon S3. When enabled, workgroup users can set encryption only to the minimum level set by the administrator or higher when they submit queries. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.4.0](service/chimesdkvoice/CHANGELOG.md#v140-2023-03-27) + * **Feature**: Documentation updates for Amazon Chime SDK Voice. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.50.0](service/connect/CHANGELOG.md#v1500-2023-03-27) + * **Feature**: This release introduces support for RelatedContactId in the StartChatContact API. Interactive message and interactive message response have been added to the list of supported message content types for this API as well. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.15.7](service/connectparticipant/CHANGELOG.md#v1157-2023-03-27) + * **Documentation**: This release provides an update to the SendMessage API to handle interactive message response content-types. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.26.0](service/iotwireless/CHANGELOG.md#v1260-2023-03-27) + * **Feature**: Introducing new APIs that enable Sidewalk devices to communicate with AWS IoT Core through Sidewalk gateways. This will empower AWS customers to connect Sidewalk devices with other AWS IoT Services, creating possibilities for seamless integration and advanced device management. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.31.0](service/medialive/CHANGELOG.md#v1310-2023-03-27) + * **Feature**: AWS Elemental MediaLive now supports ID3 tag insertion for audio only HLS output groups. AWS Elemental Link devices now support tagging. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.72.1](service/sagemaker/CHANGELOG.md#v1721-2023-03-27) + * **Documentation**: Fixed some improperly rendered links in SDK documentation. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.30.0](service/securityhub/CHANGELOG.md#v1300-2023-03-27) + * **Feature**: Added new resource detail objects to ASFF, including resources for AwsEksCluster, AWSS3Bucket, AwsEc2RouteTable and AwsEC2Instance. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.17.0](service/servicecatalogappregistry/CHANGELOG.md#v1170-2023-03-27) + * **Feature**: In this release, we started supporting ARN in applicationSpecifier and attributeGroupSpecifier. GetAttributeGroup, ListAttributeGroups and ListAttributeGroupsForApplication APIs will now have CreatedBy field in the response. +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.13.0](service/voiceid/CHANGELOG.md#v1130-2023-03-27) + * **Feature**: Amazon Connect Voice ID now supports multiple fraudster watchlists. Every domain has a default watchlist where all existing fraudsters are placed by default. Custom watchlists may now be created, managed, and evaluated against for known fraudster detection. + +# Release (2023-03-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.25.7](service/cloudwatch/CHANGELOG.md#v1257-2023-03-24) + * **Documentation**: Doc-only update to correct alarm actions list +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.23.0](service/comprehend/CHANGELOG.md#v1230-2023-03-24) + * **Feature**: This release adds a new field (FlywheelArn) to the EntitiesDetectionJobProperties object. The FlywheelArn field is returned in the DescribeEntitiesDetectionJob and ListEntitiesDetectionJobs responses when the EntitiesDetection job is started with a FlywheelArn instead of an EntityRecognizerArn . +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.41.0](service/rds/CHANGELOG.md#v1410-2023-03-24) + * **Feature**: Added error code CreateCustomDBEngineVersionFault for when the create custom engine version for Custom engines fails. + +# Release (2023-03-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.22.0](service/batch/CHANGELOG.md#v1220-2023-03-23) + * **Feature**: This feature allows Batch to support configuration of ephemeral storage size for jobs running on FARGATE +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.11.0](service/chimesdkidentity/CHANGELOG.md#v1110-2023-03-23) + * **Feature**: AppInstanceBots can be used to add a bot powered by Amazon Lex to chat channels. ExpirationSettings provides automatic resource deletion for AppInstanceUsers. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.3.0](service/chimesdkmediapipelines/CHANGELOG.md#v130-2023-03-23) + * **Feature**: This release adds Amazon Chime SDK call analytics. Call analytics include voice analytics, which provides speaker search and voice tone analysis. These capabilities can be used with Amazon Transcribe and Transcribe Call Analytics to generate machine-learning-powered insights from real-time audio. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.14.0](service/chimesdkmessaging/CHANGELOG.md#v1140-2023-03-23) + * **Feature**: ExpirationSettings provides automatic resource deletion for Channels. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.3.0](service/chimesdkvoice/CHANGELOG.md#v130-2023-03-23) + * **Feature**: This release adds Amazon Chime SDK call analytics. Call analytics include voice analytics, which provides speaker search and voice tone analysis. These capabilities can be used with Amazon Transcribe and Transcribe Call Analytics to generate machine-learning-powered insights from real-time audio. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.18.0](service/codeartifact/CHANGELOG.md#v1180-2023-03-23) + * **Feature**: Repository CreationTime is added to the CreateRepository and ListRepositories API responses. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.19.0](service/guardduty/CHANGELOG.md#v1190-2023-03-23) + * **Feature**: Adds AutoEnableOrganizationMembers attribute to DescribeOrganizationConfiguration and UpdateOrganizationConfiguration APIs. +* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.0.0](service/ivsrealtime/CHANGELOG.md#v100-2023-03-23) + * **Release**: New AWS service client module + * **Feature**: Initial release of the Amazon Interactive Video Service RealTime API. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.34.0](service/mediaconvert/CHANGELOG.md#v1340-2023-03-23) + * **Feature**: AWS Elemental MediaConvert SDK now supports passthrough of ID3v2 tags for audio inputs to audio-only HLS outputs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.72.0](service/sagemaker/CHANGELOG.md#v1720-2023-03-23) + * **Feature**: Amazon SageMaker Autopilot adds two new APIs - CreateAutoMLJobV2 and DescribeAutoMLJobV2. Amazon SageMaker Notebook Instances now supports the ml.geospatial.interactive instance type. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.21.0](service/servicediscovery/CHANGELOG.md#v1210-2023-03-23) + * **Feature**: Reverted the throttling exception RequestLimitExceeded for AWS Cloud Map APIs introduced in SDK version 1.12.424 2023-03-09 to previous exception specified in the ErrorCode. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.21.0](service/textract/CHANGELOG.md#v1210-2023-03-23) + * **Feature**: The AnalyzeDocument - Tables feature adds support for new elements in the API: table titles, footers, section titles, summary cells/tables, and table type. + +# Release (2023-03-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.8](service/iam/CHANGELOG.md#v1198-2023-03-22) + * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.11.0](service/iottwinmaker/CHANGELOG.md#v1110-2023-03-22) + * **Feature**: This release adds support of adding metadata when creating a new scene or updating an existing scene. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.8](service/networkmanager/CHANGELOG.md#v1178-2023-03-22) + * **Documentation**: This release includes an update to create-transit-gateway-route-table-attachment, showing example usage for TransitGatewayRouteTableArn. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.10.0](service/resiliencehub/CHANGELOG.md#v1100-2023-03-22) + * **Feature**: This release provides customers with the ability to import resources from within an EKS cluster and assess the resiliency of EKS cluster workloads. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.36.0](service/ssm/CHANGELOG.md#v1360-2023-03-22) + * **Feature**: This Patch Manager release supports creating, updating, and deleting Patch Baselines for AmazonLinux2023, AlmaLinux. + +# Release (2023-03-21) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.13.0](service/chimesdkmessaging/CHANGELOG.md#v1130-2023-03-21) + * **Feature**: Amazon Chime SDK messaging customers can now manage streaming configuration for messaging data for archival and analysis. +* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.1.0](service/cleanrooms/CHANGELOG.md#v110-2023-03-21) + * **Feature**: GA Release of AWS Clean Rooms, Added Tagging Functionality +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.91.0](service/ec2/CHANGELOG.md#v1910-2023-03-21) + * **Feature**: This release adds support for AWS Network Firewall, AWS PrivateLink, and Gateway Load Balancers to Amazon VPC Reachability Analyzer, and it makes the path destination optional as long as a destination address in the filter at source is provided. +* `github.com/aws/aws-sdk-go-v2/service/internal/s3shared`: [v1.14.0](service/internal/s3shared/CHANGELOG.md#v1140-2023-03-21) + * **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.28.0](service/iotsitewise/CHANGELOG.md#v1280-2023-03-21) + * **Feature**: Provide support for tagging of data streams and enabling tag based authorization for property alias +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.18.0](service/mgn/CHANGELOG.md#v1180-2023-03-21) + * **Feature**: This release introduces the Import and export feature and expansion of the post-launch actions +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.31.0](service/s3/CHANGELOG.md#v1310-2023-03-21) + * **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config + +# Release (2023-03-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.19.0](service/applicationautoscaling/CHANGELOG.md#v1190-2023-03-20) + * **Feature**: With this release customers can now tag their Application Auto Scaling registered targets with key-value pairs and manage IAM permissions for all the tagged resources centrally. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.20.0](service/neptune/CHANGELOG.md#v1200-2023-03-20) + * **Feature**: This release makes following few changes. db-cluster-identifier is now a required parameter of create-db-instance. describe-db-cluster will now return PendingModifiedValues and GlobalClusterIdentifier fields in the response. +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.16.0](service/s3outposts/CHANGELOG.md#v1160-2023-03-20) + * **Feature**: S3 On Outposts added support for endpoint status, and a failed endpoint reason, if any +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.14.0](service/workdocs/CHANGELOG.md#v1140-2023-03-20) + * **Feature**: This release adds a new API, SearchResources, which enable users to search through metadata and content of folders, documents, document versions and comments in a WorkDocs site. + +# Release (2023-03-17) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.6.0](service/billingconductor/CHANGELOG.md#v160-2023-03-17) + * **Feature**: This release adds a new filter to ListAccountAssociations API and a new filter to ListBillingGroups API. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.30.0](service/configservice/CHANGELOG.md#v1300-2023-03-17) + * **Feature**: This release adds resourceType enums for types released from October 2022 through February 2023. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.25.0](service/databasemigrationservice/CHANGELOG.md#v1250-2023-03-17) + * **Feature**: S3 setting to create AWS Glue Data Catalog. Oracle setting to control conversion of timestamp column. Support for Kafka SASL Plain authentication. Setting to map boolean from PostgreSQL to Redshift. SQL Server settings to force lob lookup on inline LOBs and to control access of database logs. + +# Release (2023-03-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.18](config/CHANGELOG.md#v11818-2023-03-16) + * **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.18.0](service/guardduty/CHANGELOG.md#v1180-2023-03-16) + * **Feature**: Updated 9 APIs for feature enablement to reflect expansion of GuardDuty to features. Added new APIs and updated existing APIs to support RDS Protection GA. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.2.7](service/resourceexplorer2/CHANGELOG.md#v127-2023-03-16) + * **Documentation**: Documentation updates for APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.18.7](service/sagemakerruntime/CHANGELOG.md#v1187-2023-03-16) + * **Documentation**: Documentation updates for SageMaker Runtime + +# Release (2023-03-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.9.0](service/migrationhubstrategy/CHANGELOG.md#v190-2023-03-15) + * **Feature**: This release adds the binary analysis that analyzes IIS application DLLs on Windows and Java applications on Linux to provide anti-pattern report without configuring access to the source code. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.31.0](service/s3control/CHANGELOG.md#v1310-2023-03-15) + * **Feature**: Added support for S3 Object Lambda aliases. +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.3.0](service/securitylake/CHANGELOG.md#v130-2023-03-15) + * **Feature**: Make Create/Get/ListSubscribers APIs return resource share ARN and name so they can be used to validate the RAM resource share to accept. GetDatalake can be used to track status of UpdateDatalake and DeleteDatalake requests. + +# Release (2023-03-14) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.13.0](feature/ec2/imds/CHANGELOG.md#v1130-2023-03-14) + * **Feature**: Add flag to disable IMDSv1 fallback +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.18.0](service/applicationautoscaling/CHANGELOG.md#v1180-2023-03-14) + * **Feature**: Application Auto Scaling customers can now use mathematical functions to customize the metric used with Target Tracking policies within the policy configuration itself, saving the cost and effort of publishing the customizations as a separate metric. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.19.0](service/dataexchange/CHANGELOG.md#v1190-2023-03-14) + * **Feature**: This release enables data providers to license direct access to S3 objects encrypted with Customer Managed Keys (CMK) in AWS KMS through AWS Data Exchange. Subscribers can use these keys to decrypt, then use the encrypted S3 objects shared with them, without creating or managing copies. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.7](service/directconnect/CHANGELOG.md#v1187-2023-03-14) + * **Documentation**: describe-direct-connect-gateway-associations includes a new status, updating, indicating that the association is currently in-process of updating. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.90.0](service/ec2/CHANGELOG.md#v1900-2023-03-14) + * **Feature**: This release adds a new DnsOptions key (PrivateDnsOnlyForInboundResolverEndpoint) to CreateVpcEndpoint and ModifyVpcEndpoint APIs. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.6](service/iam/CHANGELOG.md#v1196-2023-03-14) + * **Documentation**: Documentation only updates to correct customer-reported issues +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.2.0](service/keyspaces/CHANGELOG.md#v120-2023-03-14) + * **Feature**: Adding support for client-side timestamps +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.14.6](service/support/CHANGELOG.md#v1146-2023-03-14) + * **Announcement**: Model regenerated with support for null string values to properly implement `support` service operations `DescribeTrustedAdvisorCheckRefreshStatuses` and `DescribeTrustedAdvisorCheckSummaries` + +# Release (2023-03-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.15.0](service/appintegrations/CHANGELOG.md#v1150-2023-03-13) + * **Feature**: Adds FileConfiguration to Amazon AppIntegrations CreateDataIntegration supporting scheduled downloading of third party files into Amazon Connect from sources such as Microsoft SharePoint. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.20.2](service/lakeformation/CHANGELOG.md#v1202-2023-03-13) + * **Documentation**: This release updates the documentation regarding Get/Update DataCellsFilter +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.30.0](service/s3control/CHANGELOG.md#v1300-2023-03-13) + * **Feature**: Added support for cross-account Multi-Region Access Points. Added support for S3 Replication for S3 on Outposts. +* `github.com/aws/aws-sdk-go-v2/service/tnb`: [v1.1.0](service/tnb/CHANGELOG.md#v110-2023-03-13) + * **Feature**: This release adds tagging support to the following Network Instance APIs : Instantiate, Update, Terminate. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.13.0](service/wisdom/CHANGELOG.md#v1130-2023-03-13) + * **Feature**: This release extends Wisdom CreateKnowledgeBase API to support SharePoint connector type by removing the @required trait for objectField + +# Release (2023-03-10) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.4.0](service/ivschat/CHANGELOG.md#v140-2023-03-10) + * **Feature**: This release adds a new exception returned when calling AWS IVS chat UpdateLoggingConfiguration. Now UpdateLoggingConfiguration can return ConflictException when invalid updates are made in sequence to Logging Configurations. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.19.0](service/secretsmanager/CHANGELOG.md#v1190-2023-03-10) + * **Feature**: The type definitions of SecretString and SecretBinary now have a minimum length of 1 in the model to match the exception thrown when you pass in empty values. + +# Release (2023-03-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.17.0](service/codeartifact/CHANGELOG.md#v1170-2023-03-09) + * **Feature**: This release introduces the generic package format, a mechanism for storing arbitrary binary assets. It also adds a new API, PublishPackageVersion, to allow for publishing generic packages. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.49.0](service/connect/CHANGELOG.md#v1490-2023-03-09) + * **Feature**: This release adds a new API, GetMetricDataV2, which returns metric data for Amazon Connect. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.11.0](service/evidently/CHANGELOG.md#v1110-2023-03-09) + * **Feature**: Updated entity override documentation +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.5](service/networkmanager/CHANGELOG.md#v1175-2023-03-09) + * **Documentation**: This update provides example usage for TransitGatewayRouteTableArn. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.33.0](service/quicksight/CHANGELOG.md#v1330-2023-03-09) + * **Feature**: This release has two changes: add state persistence feature for embedded dashboard and console in GenerateEmbedUrlForRegisteredUser API; add properties for hidden collapsed row dimensions in PivotTableOptions. +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.19.0](service/redshiftdata/CHANGELOG.md#v1190-2023-03-09) + * **Feature**: Added support for Redshift Serverless workgroup-arn wherever the WorkgroupName parameter is available. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.71.0](service/sagemaker/CHANGELOG.md#v1710-2023-03-09) + * **Feature**: Amazon SageMaker Inference now allows SSM access to customer's model container by setting the "EnableSSMAccess" parameter for a ProductionVariant in CreateEndpointConfig API. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.20.0](service/servicediscovery/CHANGELOG.md#v1200-2023-03-09) + * **Feature**: Updated all AWS Cloud Map APIs to provide consistent throttling exception (RequestLimitExceeded) +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.17.0](service/sesv2/CHANGELOG.md#v1170-2023-03-09) + * **Feature**: This release introduces a new recommendation in Virtual Deliverability Manager Advisor, which detects missing or misconfigured Brand Indicator for Message Identification (BIMI) DNS records for customer sending identities. + +# Release (2023-03-08) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.23.0](service/athena/CHANGELOG.md#v1230-2023-03-08) + * **Feature**: A new field SubstatementType is added to GetQueryExecution API, so customers have an error free way to detect the query type and interpret the result. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.19.0](service/dynamodb/CHANGELOG.md#v1190-2023-03-08) + * **Feature**: Adds deletion protection support to DynamoDB tables. Tables with deletion protection enabled cannot be deleted. Deletion protection is disabled by default, can be enabled via the CreateTable or UpdateTable APIs, and is visible in TableDescription. This setting is not replicated for Global Tables. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.89.0](service/ec2/CHANGELOG.md#v1890-2023-03-08) + * **Feature**: Introducing Amazon EC2 C7g, M7g and R7g instances, powered by the latest generation AWS Graviton3 processors and deliver up to 25% better performance over Graviton2-based instances. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.20.0](service/lakeformation/CHANGELOG.md#v1200-2023-03-08) + * **Feature**: This release adds two new API support "GetDataCellsFiler" and "UpdateDataCellsFilter", and also updates the corresponding documentation. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.21.0](service/mediapackage/CHANGELOG.md#v1210-2023-03-08) + * **Feature**: This release provides the date and time live resources were created. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.22.0](service/mediapackagevod/CHANGELOG.md#v1220-2023-03-08) + * **Feature**: This release provides the date and time VOD resources were created. +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.17.0](service/route53resolver/CHANGELOG.md#v1170-2023-03-08) + * **Feature**: Add dual-stack and IPv6 support for Route 53 Resolver Endpoint,Add IPv6 target IP in Route 53 Resolver Forwarding Rule +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.70.0](service/sagemaker/CHANGELOG.md#v1700-2023-03-08) + * **Feature**: There needs to be a user identity to specify the SageMaker user who perform each action regarding the entity. However, these is a not a unified concept of user identity across SageMaker service that could be used today. + +# Release (2023-03-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.24.0](service/databasemigrationservice/CHANGELOG.md#v1240-2023-03-07) + * **Feature**: This release adds DMS Fleet Advisor Target Recommendation APIs and exposes functionality for DMS Fleet Advisor. It adds functionality to start Target Recommendation calculation. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.22.1](service/location/CHANGELOG.md#v1221-2023-03-07) + * **Documentation**: Documentation update for the release of 3 additional map styles for use with Open Data Maps: Open Data Standard Dark, Open Data Visualization Light & Open Data Visualization Dark. + +# Release (2023-03-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.10.0](service/account/CHANGELOG.md#v1100-2023-03-06) + * **Feature**: AWS Account alternate contact email addresses can now have a length of 254 characters and contain the character "|". +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.6](service/ivs/CHANGELOG.md#v1206-2023-03-06) + * **Documentation**: Updated text description in DeleteChannel, Stream, and StreamSummary. + +# Release (2023-03-03) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.18.6](service/dynamodb/CHANGELOG.md#v1186-2023-03-03) + * **Documentation**: Documentation updates for DynamoDB. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.88.0](service/ec2/CHANGELOG.md#v1880-2023-03-03) + * **Feature**: This release adds support for a new boot mode for EC2 instances called 'UEFI Preferred'. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.27.1](service/macie2/CHANGELOG.md#v1271-2023-03-03) + * **Documentation**: Documentation updates for Amazon Macie +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.33.0](service/mediaconvert/CHANGELOG.md#v1330-2023-03-03) + * **Feature**: The AWS Elemental MediaConvert SDK has improved handling for different input and output color space combinations. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.30.0](service/medialive/CHANGELOG.md#v1300-2023-03-03) + * **Feature**: AWS Elemental MediaLive adds support for Nielsen watermark timezones. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.26.0](service/transcribe/CHANGELOG.md#v1260-2023-03-03) + * **Feature**: Amazon Transcribe now supports role access for these API operations: CreateVocabulary, UpdateVocabulary, CreateVocabularyFilter, and UpdateVocabularyFilter. + +# Release (2023-03-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.35.0](service/iot/CHANGELOG.md#v1350-2023-03-02) + * **Feature**: A recurring maintenance window is an optional configuration used for rolling out the job document to all devices in the target group observing a predetermined start time, duration, and frequency that the maintenance window occurs. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.8.0](service/migrationhubstrategy/CHANGELOG.md#v180-2023-03-02) + * **Feature**: This release updates the File Import API to allow importing servers already discovered by customers with reduced pre-requisites. +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.19.0](service/organizations/CHANGELOG.md#v1190-2023-03-02) + * **Feature**: This release introduces a new reason code, ACCOUNT_CREATION_NOT_COMPLETE, to ConstraintViolationException in CreateOrganization API. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.17.0](service/pi/CHANGELOG.md#v1170-2023-03-02) + * **Feature**: This release adds a new field PeriodAlignment to allow the customer specifying the returned timestamp of time periods to be either the start or end time. +* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.2.0](service/pipes/CHANGELOG.md#v120-2023-03-02) + * **Feature**: This release fixes some input parameter range and patterns. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.69.0](service/sagemaker/CHANGELOG.md#v1690-2023-03-02) + * **Feature**: Add a new field "EndpointMetrics" in SageMaker Inference Recommender "ListInferenceRecommendationsJobSteps" API response. + +# Release (2023-03-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.2.0](service/codecatalyst/CHANGELOG.md#v120-2023-03-01) + * **Feature**: Published Dev Environments StopDevEnvironmentSession API +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.19.0](service/pricing/CHANGELOG.md#v1190-2023-03-01) + * **Feature**: This release adds 2 new APIs - ListPriceLists which returns a list of applicable price lists, and GetPriceListFileUrl which outputs a URL to retrieve your price lists from the generated file from ListPriceLists +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.15.0](service/s3outposts/CHANGELOG.md#v1150-2023-03-01) + * **Feature**: S3 on Outposts introduces a new API ListOutpostsWithS3, with this API you can list all your Outposts with S3 capacity. + +# Release (2023-02-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.22.0](service/comprehend/CHANGELOG.md#v1220-2023-02-28) + * **Feature**: Amazon Comprehend now supports flywheels to help you train and manage new model versions for custom models. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.87.0](service/ec2/CHANGELOG.md#v1870-2023-02-28) + * **Feature**: This release allows IMDS support to be set to v2-only on an existing AMI, so that all future instances launched from that AMI will use IMDSv2 by default. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.20.6](service/kms/CHANGELOG.md#v1206-2023-02-28) + * **Documentation**: AWS KMS is deprecating the RSAES_PKCS1_V1_5 wrapping algorithm option in the GetParametersForImport API that is used in the AWS KMS Import Key Material feature. AWS KMS will end support for this wrapping algorithm by October 1, 2023. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.26.0](service/lightsail/CHANGELOG.md#v1260-2023-02-28) + * **Feature**: This release adds Lightsail for Research feature support, such as GUI session access, cost estimates, stop instance on idle, and disk auto mount. +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.15.0](service/managedblockchain/CHANGELOG.md#v1150-2023-02-28) + * **Feature**: This release adds support for tagging to the accessor resource in Amazon Managed Blockchain +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.2.0](service/omics/CHANGELOG.md#v120-2023-02-28) + * **Feature**: Minor model changes to accomodate batch imports feature + +# Release (2023-02-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.23.0](service/devopsguru/CHANGELOG.md#v1230-2023-02-27) + * **Feature**: This release adds the description field on ListAnomaliesForInsight and DescribeAnomaly API responses for proactive anomalies. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.11.0](service/drs/CHANGELOG.md#v1110-2023-02-27) + * **Feature**: New fields were added to reflect availability zone data in source server and recovery instance description commands responses, as well as source server launch status. +* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.0.0](service/internetmonitor/CHANGELOG.md#v100-2023-02-27) + * **Release**: New AWS service client module + * **Feature**: CloudWatch Internet Monitor is a a new service within CloudWatch that will help application developers and network engineers continuously monitor internet performance metrics such as availability and performance between their AWS-hosted applications and end-users of these applications +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.30.0](service/lambda/CHANGELOG.md#v1300-2023-02-27) + * **Feature**: This release adds the ability to create ESMs with Document DB change streams as event source. For more information see https://docs.aws.amazon.com/lambda/latest/dg/with-documentdb.html. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.32.0](service/mediaconvert/CHANGELOG.md#v1320-2023-02-27) + * **Feature**: The AWS Elemental MediaConvert SDK has added support for HDR10 to SDR tone mapping, and animated GIF video input sources. +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.16.0](service/timestreamwrite/CHANGELOG.md#v1160-2023-02-27) + * **Feature**: This release adds the ability to ingest batched historical data or migrate data in bulk from S3 into Timestream using CSV files. + +# Release (2023-02-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.48.0](service/connect/CHANGELOG.md#v1480-2023-02-24) + * **Feature**: StartTaskContact API now supports linked task creation with a new optional RelatedContactId parameter +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.3.0](service/connectcases/CHANGELOG.md#v130-2023-02-24) + * **Feature**: This release adds the ability to delete domains through the DeleteDomain API. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.27.5](service/redshift/CHANGELOG.md#v1275-2023-02-24) + * **Documentation**: Documentation updates for Redshift API bringing it in line with IAM best practices. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.29.0](service/securityhub/CHANGELOG.md#v1290-2023-02-24) + * **Feature**: New Security Hub APIs and updates to existing APIs that help you consolidate control findings and enable and disable controls across all supported standards +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.16.5](service/servicecatalog/CHANGELOG.md#v1165-2023-02-24) + * **Documentation**: Documentation updates for Service Catalog + +# Release (2023-02-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.25.0](service/appflow/CHANGELOG.md#v1250-2023-02-23) + * **Feature**: This release enables the customers to choose whether to use Private Link for Metadata and Authorization call when using a private Salesforce connections +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.24.0](service/ecs/CHANGELOG.md#v1240-2023-02-23) + * **Feature**: This release supports deleting Amazon ECS task definitions that are in the INACTIVE state. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.12.3](service/grafana/CHANGELOG.md#v1123-2023-02-23) + * **Documentation**: Doc-only update. Updated information on attached role policies for customer provided roles +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.17.6](service/guardduty/CHANGELOG.md#v1176-2023-02-23) + * **Documentation**: Updated API and data types descriptions for CreateFilter, UpdateFilter, and TriggerDetails. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.25.0](service/iotwireless/CHANGELOG.md#v1250-2023-02-23) + * **Feature**: In this release, we add additional capabilities for the FUOTA which allows user to configure the fragment size, the sending interval and the redundancy ratio of the FUOTA tasks +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.22.0](service/location/CHANGELOG.md#v1220-2023-02-23) + * **Feature**: This release adds support for using Maps APIs with an API Key in addition to AWS Cognito. This includes support for adding, listing, updating and deleting API Keys. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.27.0](service/macie2/CHANGELOG.md#v1270-2023-02-23) + * **Feature**: This release adds support for a new finding type, Policy:IAMUser/S3BucketSharedWithCloudFront, and S3 bucket metadata that indicates if a bucket is shared with an Amazon CloudFront OAI or OAC. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.26.0](service/wafv2/CHANGELOG.md#v1260-2023-02-23) + * **Feature**: You can now associate an AWS WAF v2 web ACL with an AWS App Runner service. + +# Release (2023-02-22) + +## General Highlights +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.2.0](service/chimesdkvoice/CHANGELOG.md#v120-2023-02-22) + * **Feature**: This release introduces support for Voice Connector media metrics in the Amazon Chime SDK Voice namespace +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.26.0](service/cloudfront/CHANGELOG.md#v1260-2023-02-22) + * **Feature**: CloudFront now supports block lists in origin request policies so that you can forward all headers, cookies, or query string from viewer requests to the origin *except* for those specified in the block list. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.23.0](service/datasync/CHANGELOG.md#v1230-2023-02-22) + * **Feature**: AWS DataSync has relaxed the minimum length constraint of AccessKey for Object Storage locations to 1. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.15.0](service/opensearch/CHANGELOG.md#v1150-2023-02-22) + * **Feature**: This release lets customers configure Off-peak window and software update related properties for a new/existing domain. It enhances the capabilities of StartServiceSoftwareUpdate API; adds 2 new APIs - ListScheduledActions & UpdateScheduledAction; and allows Auto-tune to make use of Off-peak window. +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.10.0](service/rum/CHANGELOG.md#v1100-2023-02-22) + * **Feature**: CloudWatch RUM now supports CloudWatch Custom Metrics +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.35.5](service/ssm/CHANGELOG.md#v1355-2023-02-22) + * **Documentation**: Document only update for Feb 2023 + +# Release (2023-02-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.32.0](service/quicksight/CHANGELOG.md#v1320-2023-02-21) + * **Feature**: S3 data sources now accept a custom IAM role. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.9.0](service/resiliencehub/CHANGELOG.md#v190-2023-02-21) + * **Feature**: In this release we improved resilience hub application creation and maintenance by introducing new resource and app component crud APIs, improving visibility and maintenance of application input sources and added support for additional information attributes to be provided by customers. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.28.4](service/securityhub/CHANGELOG.md#v1284-2023-02-21) + * **Documentation**: Documentation updates for AWS Security Hub +* `github.com/aws/aws-sdk-go-v2/service/tnb`: [v1.0.0](service/tnb/CHANGELOG.md#v100-2023-02-21) + * **Release**: New AWS service client module + * **Feature**: This is the initial SDK release for AWS Telco Network Builder (TNB). AWS Telco Network Builder is a network automation service that helps you deploy and manage telecom networks. + +# Release (2023-02-20) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.17.5 + * **Bug Fix**: fix int overflow bug on 32 bit architecture +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.24.0](service/auditmanager/CHANGELOG.md#v1240-2023-02-20) + * **Feature**: This release introduces a ServiceQuotaExceededException to the UpdateAssessmentFrameworkShare API operation. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.47.0](service/connect/CHANGELOG.md#v1470-2023-02-20) + * **Feature**: Reasons for failed diff has been approved by SDK Reviewer + +# Release (2023-02-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.17.0](service/apprunner/CHANGELOG.md#v1170-2023-02-17) + * **Feature**: This release supports removing MaxSize limit for AutoScalingConfiguration. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.43.0](service/glue/CHANGELOG.md#v1430-2023-02-17) + * **Feature**: Release of Delta Lake Data Lake Format for Glue Studio Service + +# Release (2023-02-16) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.23.0](service/emr/CHANGELOG.md#v1230-2023-02-16) + * **Feature**: This release provides customers the ability to define a timeout period for procuring capacity during a resize operation for Instance Fleet clusters. Customers can specify this timeout using the ResizeSpecifications parameter supported by RunJobFlow, ModifyInstanceFleet and AddInstanceFleet APIs. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.12.0](service/grafana/CHANGELOG.md#v1120-2023-02-16) + * **Feature**: With this release Amazon Managed Grafana now supports inbound Network Access Control that helps you to restrict user access to your Grafana workspaces +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.3](service/ivs/CHANGELOG.md#v1203-2023-02-16) + * **Documentation**: Doc-only update. Updated text description in DeleteChannel, Stream, and StreamSummary. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.25.1](service/wafv2/CHANGELOG.md#v1251-2023-02-16) + * **Documentation**: Added a notice for account takeover prevention (ATP). The interface incorrectly lets you to configure ATP response inspection in regional web ACLs in Region US East (N. Virginia), without returning an error. ATP response inspection is only available in web ACLs that protect CloudFront distributions. + +# Release (2023-02-15) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.19.3](service/accessanalyzer/CHANGELOG.md#v1193-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.9.1](service/account/CHANGELOG.md#v191-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.17.3](service/acm/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.21.2](service/acmpca/CHANGELOG.md#v1212-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.15.2](service/alexaforbusiness/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.16.2](service/amp/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.13.2](service/amplify/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.14.2](service/amplifybackend/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.9.2](service/amplifyuibuilder/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.16.3](service/apigateway/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.11.2](service/apigatewaymanagementapi/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.13.3](service/apigatewayv2/CHANGELOG.md#v1133-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.17.1](service/appconfig/CHANGELOG.md#v1171-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.6.1](service/appconfigdata/CHANGELOG.md#v161-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.24.2](service/appflow/CHANGELOG.md#v1242-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.14.2](service/appintegrations/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.17.3](service/applicationautoscaling/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.10.2](service/applicationcostprofiler/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.15.2](service/applicationdiscoveryservice/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.17.3](service/applicationinsights/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.17.2](service/appmesh/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.16.2](service/apprunner/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.20.2](service/appstream/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.19.2](service/appsync/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.1.3](service/arczonalshift/CHANGELOG.md#v113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.22.2](service/athena/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.23.2](service/auditmanager/CHANGELOG.md#v1232-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.13.2](service/autoscalingplans/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.20.1](service/backup/CHANGELOG.md#v1201-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.9.2](service/backupgateway/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.1.2](service/backupstorage/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.21.3](service/batch/CHANGELOG.md#v1213-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.5.2](service/billingconductor/CHANGELOG.md#v152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.17.2](service/braket/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.14.2](service/budgets/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.22.2](service/chime/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.10.2](service/chimesdkidentity/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.2.2](service/chimesdkmediapipelines/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.14.3](service/chimesdkmeetings/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.12.2](service/chimesdkmessaging/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.1.2](service/chimesdkvoice/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.0.2](service/cleanrooms/CHANGELOG.md#v102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.17.2](service/cloud9/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.11.3](service/cloudcontrol/CHANGELOG.md#v1113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.13.2](service/clouddirectory/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.13.2](service/cloudhsm/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.14.2](service/cloudhsmv2/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.12.2](service/cloudsearchdomain/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.24.0](service/cloudtrail/CHANGELOG.md#v1240-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: This release adds an InsufficientEncryptionPolicyException type to the StartImport endpoint + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudtraildata`: [v1.0.2](service/cloudtraildata/CHANGELOG.md#v102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.15.3](service/cloudwatchevents/CHANGELOG.md#v1153-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.20.3](service/cloudwatchlogs/CHANGELOG.md#v1203-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.16.2](service/codeartifact/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.20.3](service/codebuild/CHANGELOG.md#v1203-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.1.2](service/codecatalyst/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.14.2](service/codecommit/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.16.3](service/codedeploy/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.13.2](service/codeguruprofiler/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.17.2](service/codegurureviewer/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.14.2](service/codepipeline/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.13.2](service/codestar/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codestarconnections`: [v1.14.2](service/codestarconnections/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.14.2](service/codestarnotifications/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.15.2](service/cognitoidentity/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.22.2](service/cognitoidentityprovider/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.12.2](service/cognitosync/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.21.2](service/comprehend/CHANGELOG.md#v1212-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.15.2](service/comprehendmedical/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.21.1](service/computeoptimizer/CHANGELOG.md#v1211-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.29.3](service/configservice/CHANGELOG.md#v1293-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.46.1](service/connect/CHANGELOG.md#v1461-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.2.3](service/connectcampaigns/CHANGELOG.md#v123-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.2.3](service/connectcases/CHANGELOG.md#v123-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.13.2](service/connectcontactlens/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.15.2](service/connectparticipant/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.1.2](service/controltower/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.15.2](service/costandusagereportservice/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.25.2](service/costexplorer/CHANGELOG.md#v1252-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.23.1](service/customerprofiles/CHANGELOG.md#v1231-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.23.3](service/databasemigrationservice/CHANGELOG.md#v1233-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.21.3](service/databrew/CHANGELOG.md#v1213-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.18.2](service/dataexchange/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.14.2](service/datapipeline/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.22.1](service/datasync/CHANGELOG.md#v1221-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.12.2](service/dax/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.18.2](service/detective/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.15.2](service/devicefarm/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.22.2](service/devopsguru/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.3](service/directconnect/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.16.3](service/directoryservice/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.14.4](service/dlm/CHANGELOG.md#v1144-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.1.2](service/docdbelastic/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.10.2](service/drs/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.18.3](service/dynamodb/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.14.3](service/dynamodbstreams/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.16.4](service/ebs/CHANGELOG.md#v1164-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.15.2](service/ec2instanceconnect/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.18.3](service/ecr/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.15.2](service/ecrpublic/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.23.3](service/ecs/CHANGELOG.md#v1233-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.19.4](service/efs/CHANGELOG.md#v1194-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. + * **Documentation**: Documentation update for EFS to support IAM best practices. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.27.3](service/eks/CHANGELOG.md#v1273-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.12.2](service/elasticinference/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.18.3](service/elasticsearchservice/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.14.2](service/elastictranscoder/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.22.3](service/emr/CHANGELOG.md#v1223-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.17.1](service/emrcontainers/CHANGELOG.md#v1171-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.5.2](service/emrserverless/CHANGELOG.md#v152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.18.3](service/eventbridge/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.10.2](service/evidently/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.9.2](service/finspace/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.14.2](service/finspacedata/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.16.3](service/firehose/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.14.2](service/fis/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.22.3](service/fms/CHANGELOG.md#v1223-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.25.2](service/forecast/CHANGELOG.md#v1252-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.13.2](service/forecastquery/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.23.0](service/frauddetector/CHANGELOG.md#v1230-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: This release introduces Lists feature which allows customers to reference a set of values in Fraud Detector's rules. With Lists, customers can dynamically manage these attributes in real time. Lists can be created/deleted and its contents can be modified using the Fraud Detector API. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.28.3](service/fsx/CHANGELOG.md#v1283-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.17.2](service/gamelift/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.2.2](service/gamesparks/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.14.3](service/glacier/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.16.2](service/globalaccelerator/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.42.0](service/glue/CHANGELOG.md#v1420-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: Fix DirectJDBCSource not showing up in CLI code gen + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.11.2](service/grafana/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.15.3](service/greengrass/CHANGELOG.md#v1153-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.21.3](service/greengrassv2/CHANGELOG.md#v1213-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.17.2](service/groundstation/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.17.3](service/guardduty/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.16.2](service/health/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.15.2](service/healthlake/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.13.2](service/honeycode/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.16.2](service/identitystore/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.22.2](service/imagebuilder/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.13.2](service/inspector/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.11.3](service/inspector2/CHANGELOG.md#v1113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.34.2](service/iot/CHANGELOG.md#v1342-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.11.2](service/iot1clickdevicesservice/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.12.2](service/iot1clickprojects/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.14.2](service/iotanalytics/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.14.2](service/iotdataplane/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.17.2](service/iotdeviceadvisor/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.15.2](service/iotevents/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.13.2](service/ioteventsdata/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.13.2](service/iotfleethub/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.3.2](service/iotfleetwise/CHANGELOG.md#v132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.12.2](service/iotjobsdataplane/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.1.2](service/iotroborunner/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.15.2](service/iotsecuretunneling/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.27.2](service/iotsitewise/CHANGELOG.md#v1272-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.14.2](service/iotthingsgraph/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.10.2](service/iottwinmaker/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.24.2](service/iotwireless/CHANGELOG.md#v1242-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.2](service/ivs/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.3.2](service/ivschat/CHANGELOG.md#v132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.19.2](service/kafka/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.9.2](service/kafkaconnect/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.38.3](service/kendra/CHANGELOG.md#v1383-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kendraranking`: [v1.0.4](service/kendraranking/CHANGELOG.md#v104-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.1.2](service/keyspaces/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.17.4](service/kinesis/CHANGELOG.md#v1174-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.14.2](service/kinesisanalytics/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.16.2](service/kinesisanalyticsv2/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.15.3](service/kinesisvideo/CHANGELOG.md#v1153-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.14.3](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.11.3](service/kinesisvideomedia/CHANGELOG.md#v1113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.11.3](service/kinesisvideosignaling/CHANGELOG.md#v1113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.2.3](service/kinesisvideowebrtcstorage/CHANGELOG.md#v123-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.20.3](service/kms/CHANGELOG.md#v1203-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.19.2](service/lakeformation/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.29.2](service/lambda/CHANGELOG.md#v1292-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.17.2](service/lexmodelbuildingservice/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.28.1](service/lexmodelsv2/CHANGELOG.md#v1281-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.13.2](service/lexruntimeservice/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.17.1](service/lexruntimev2/CHANGELOG.md#v1171-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.17.2](service/licensemanager/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.1.2](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.2.2](service/licensemanagerusersubscriptions/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.25.3](service/lightsail/CHANGELOG.md#v1253-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.21.2](service/location/CHANGELOG.md#v1212-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.17.2](service/lookoutequipment/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.19.2](service/lookoutmetrics/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.15.2](service/lookoutvision/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.4.2](service/m2/CHANGELOG.md#v142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.15.2](service/machinelearning/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.15.2](service/macie/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.26.2](service/macie2/CHANGELOG.md#v1262-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.14.2](service/managedblockchain/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.15.2](service/marketplacecatalog/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.12.2](service/marketplacecommerceanalytics/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.12.2](service/marketplaceentitlementservice/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.14.3](service/marketplacemetering/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.18.2](service/mediaconnect/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.31.1](service/mediaconvert/CHANGELOG.md#v1311-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.29.2](service/medialive/CHANGELOG.md#v1292-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.20.2](service/mediapackage/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.21.2](service/mediapackagevod/CHANGELOG.md#v1212-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.13.2](service/mediastore/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.13.2](service/mediastoredata/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.22.2](service/mediatailor/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.12.2](service/memorydb/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.17.2](service/mgn/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.13.2](service/migrationhub/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.13.2](service/migrationhubconfig/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhuborchestrator`: [v1.1.2](service/migrationhuborchestrator/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.9.1](service/migrationhubrefactorspaces/CHANGELOG.md#v191-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.7.2](service/migrationhubstrategy/CHANGELOG.md#v172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.12.2](service/mobile/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.14.2](service/mq/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.14.2](service/mturk/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.14.2](service/mwaa/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.24.2](service/networkfirewall/CHANGELOG.md#v1242-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.2](service/networkmanager/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.16.2](service/nimble/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.1.3](service/oam/CHANGELOG.md#v113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.1.2](service/omics/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.14.2](service/opensearch/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.1.3](service/opensearchserverless/CHANGELOG.md#v113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.14.2](service/opsworks/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.15.2](service/opsworkscm/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.18.2](service/organizations/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.27.2](service/outposts/CHANGELOG.md#v1272-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.11.2](service/panorama/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.23.2](service/personalize/CHANGELOG.md#v1232-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.13.2](service/personalizeevents/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.13.2](service/personalizeruntime/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.16.3](service/pi/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.18.2](service/pinpoint/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.12.2](service/pinpointemail/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.11.2](service/pinpointsmsvoice/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.1.2](service/pinpointsmsvoicev2/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.1.2](service/pipes/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.25.1](service/polly/CHANGELOG.md#v1251-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.18.2](service/pricing/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/privatenetworks`: [v1.2.0](service/privatenetworks/CHANGELOG.md#v120-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: This release introduces a new StartNetworkResourceUpdate API, which enables return/replacement of hardware from a NetworkSite. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.20.1](service/proton/CHANGELOG.md#v1201-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.15.2](service/qldb/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.14.2](service/qldbsession/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.31.2](service/quicksight/CHANGELOG.md#v1312-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.17.3](service/ram/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.8.3](service/rbin/CHANGELOG.md#v183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.40.3](service/rds/CHANGELOG.md#v1403-2023-02-15) + * **Documentation**: Database Activity Stream support for RDS for SQL Server. +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.13.2](service/rdsdata/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.18.2](service/redshiftdata/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.4.3](service/redshiftserverless/CHANGELOG.md#v143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.23.2](service/rekognition/CHANGELOG.md#v1232-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.8.2](service/resiliencehub/CHANGELOG.md#v182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.2.3](service/resourceexplorer2/CHANGELOG.md#v123-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.14.3](service/resourcegroups/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.14.3](service/resourcegroupstaggingapi/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.18.2](service/robomaker/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.1.2](service/rolesanywhere/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.14.2](service/route53domains/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.11.2](service/route53recoverycluster/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.11.2](service/route53recoverycontrolconfig/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.9.2](service/route53recoveryreadiness/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.16.3](service/route53resolver/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.9.2](service/rum/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.14.2](service/s3outposts/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.68.1](service/sagemaker/CHANGELOG.md#v1681-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.15.2](service/sagemakera2iruntime/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.13.2](service/sagemakeredge/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.13.2](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.1.2](service/sagemakergeospatial/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakermetrics`: [v1.0.5](service/sagemakermetrics/CHANGELOG.md#v105-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.18.3](service/sagemakerruntime/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.12.2](service/savingsplans/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.1.2](service/scheduler/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.15.2](service/schemas/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.18.4](service/secretsmanager/CHANGELOG.md#v1184-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.28.2](service/securityhub/CHANGELOG.md#v1282-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.2.2](service/securitylake/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.12.2](service/serverlessapplicationrepository/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.16.2](service/servicecatalog/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.16.3](service/servicecatalogappregistry/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.19.2](service/servicediscovery/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.14.3](service/servicequotas/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.16.2](service/sesv2/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.17.3](service/sfn/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.18.2](service/shield/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.14.2](service/signer/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.1.2](service/simspaceweaver/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.13.2](service/sms/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.18.1](service/snowball/CHANGELOG.md#v1181-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.9.2](service/snowdevicemanagement/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.35.3](service/ssm/CHANGELOG.md#v1353-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.14.2](service/ssmcontacts/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.20.2](service/ssmincidents/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.2.2](service/ssmsap/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.12.2](service/sso/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.16.2](service/ssoadmin/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.14.2](service/ssooidc/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.18.3](service/storagegateway/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.14.2](service/support/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.2.2](service/supportapp/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.14.4](service/swf/CHANGELOG.md#v1144-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.17.3](service/synthetics/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.20.2](service/textract/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.15.2](service/timestreamquery/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.15.2](service/timestreamwrite/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.25.2](service/transcribe/CHANGELOG.md#v1252-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.9.2](service/transcribestreaming/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.28.3](service/transfer/CHANGELOG.md#v1283-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.17.2](service/translate/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.12.2](service/voiceid/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.12.2](service/waf/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.13.3](service/wafregional/CHANGELOG.md#v1133-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.25.0](service/wafv2/CHANGELOG.md#v1250-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: For protected CloudFront distributions, you can now use the AWS WAF Fraud Control account takeover prevention (ATP) managed rule group to block new login attempts from clients that have recently submitted too many failed login attempts. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.18.2](service/wellarchitected/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.12.2](service/wisdom/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.13.3](service/workdocs/CHANGELOG.md#v1133-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.13.2](service/worklink/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.18.2](service/workmail/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.12.2](service/workmailmessageflow/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.3](service/workspaces/CHANGELOG.md#v1283-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.9.2](service/workspacesweb/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.16.3](service/xray/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. + +# Release (2023-02-14) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.17.0](service/appconfig/CHANGELOG.md#v1170-2023-02-14) + * **Feature**: AWS AppConfig now offers the option to set a version label on hosted configuration versions. Version labels allow you to identify specific hosted configuration versions based on an alternate versioning scheme that you define. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.22.0](service/datasync/CHANGELOG.md#v1220-2023-02-14) + * **Feature**: With this launch, we are giving customers the ability to use older SMB protocol versions, enabling them to use DataSync to copy data to and from their legacy storage arrays. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.86.0](service/ec2/CHANGELOG.md#v1860-2023-02-14) + * **Feature**: With this release customers can turn host maintenance on or off when allocating or modifying a supported dedicated host. Host maintenance is turned on by default for supported hosts. + +# Release (2023-02-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.9.0](service/account/CHANGELOG.md#v190-2023-02-13) + * **Feature**: This release of the Account Management API enables customers to view and manage whether AWS Opt-In Regions are enabled or disabled for their Account. For more information, see https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.6.0](service/appconfigdata/CHANGELOG.md#v160-2023-02-13) + * **Feature**: AWS AppConfig now offers the option to set a version label on hosted configuration versions. If a labeled hosted configuration version is deployed, its version label is available in the GetLatestConfiguration response. +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.18.0](service/snowball/CHANGELOG.md#v1180-2023-02-13) + * **Feature**: Adds support for EKS Anywhere on Snowball. AWS Snow Family customers can now install EKS Anywhere service on Snowball Edge Compute Optimized devices. + +# Release (2023-02-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.27.0](service/autoscaling/CHANGELOG.md#v1270-2023-02-10) + * **Feature**: You can now either terminate/replace, ignore, or wait for EC2 Auto Scaling instances on standby or protected from scale in. Also, you can also roll back changes from a failed instance refresh. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.46.0](service/connect/CHANGELOG.md#v1460-2023-02-10) + * **Feature**: This update provides the Wisdom session ARN for contacts enabled for Wisdom in the chat channel. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.85.0](service/ec2/CHANGELOG.md#v1850-2023-02-10) + * **Feature**: Adds support for waiters that automatically poll for an imported snapshot until it reaches the completed state. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.25.0](service/polly/CHANGELOG.md#v1250-2023-02-10) + * **Feature**: Amazon Polly adds two new neural Japanese voices - Kazuha, Tomoko +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.68.0](service/sagemaker/CHANGELOG.md#v1680-2023-02-10) + * **Feature**: Amazon SageMaker Autopilot adds support for selecting algorithms in CreateAutoMLJob API. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.20.2](service/sns/CHANGELOG.md#v1202-2023-02-10) + * **Documentation**: This release adds support for SNS X-Ray active tracing as well as other updates. + +# Release (2023-02-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.14.2](service/chimesdkmeetings/CHANGELOG.md#v1142-2023-02-09) + * **Documentation**: Documentation updates for Chime Meetings SDK +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.17.0](service/emrcontainers/CHANGELOG.md#v1170-2023-02-09) + * **Feature**: EMR on EKS allows configuring retry policies for job runs through the StartJobRun API. Using retry policies, a job cause a driver pod to be restarted automatically if it fails or is deleted. The job's status can be seen in the DescribeJobRun and ListJobRun APIs and monitored using CloudWatch events. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.28.0](service/lexmodelsv2/CHANGELOG.md#v1280-2023-02-09) + * **Feature**: AWS Lex now supports Network of Bots. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.17.0](service/lexruntimev2/CHANGELOG.md#v1170-2023-02-09) + * **Feature**: AWS Lex now supports Network of Bots. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.25.2](service/lightsail/CHANGELOG.md#v1252-2023-02-09) + * **Documentation**: Documentation updates for Lightsail +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.9.0](service/migrationhubrefactorspaces/CHANGELOG.md#v190-2023-02-09) + * **Feature**: This release adds support for creating environments with a network fabric type of NONE +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.13.2](service/workdocs/CHANGELOG.md#v1132-2023-02-09) + * **Documentation**: Doc only update for the WorkDocs APIs. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.2](service/workspaces/CHANGELOG.md#v1282-2023-02-09) + * **Documentation**: Removed Windows Server 2016 BYOL and made changes based on IAM campaign. + +# Release (2023-02-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.20.0](service/backup/CHANGELOG.md#v1200-2023-02-08) + * **Feature**: This release added one attribute (resource name) in the output model of our 9 existing APIs in AWS backup so that customers will see the resource name at the output. No input required from Customers. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.25.0](service/cloudfront/CHANGELOG.md#v1250-2023-02-08) + * **Feature**: CloudFront Origin Access Control extends support to AWS Elemental MediaStore origins. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.41.0](service/glue/CHANGELOG.md#v1410-2023-02-08) + * **Feature**: DirectJDBCSource + Glue 4.0 streaming options + +# Release (2023-02-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.28.2](service/transfer/CHANGELOG.md#v1282-2023-02-07) + * **Documentation**: Updated the documentation for the ImportCertificate API call, and added examples. + +# Release (2023-02-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.21.0](service/computeoptimizer/CHANGELOG.md#v1210-2023-02-06) + * **Feature**: AWS Compute optimizer can now infer if Kafka is running on an instance. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.23.0](service/customerprofiles/CHANGELOG.md#v1230-2023-02-06) + * **Feature**: This release deprecates the PartyType and Gender enum data types from the Profile model and replaces them with new PartyTypeString and GenderString attributes, which accept any string of length up to 255. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.22.0](service/frauddetector/CHANGELOG.md#v1220-2023-02-06) + * **Feature**: My AWS Service (Amazon Fraud Detector) - This release introduces Cold Start Model Training which optimizes training for small datasets and adds intelligent methods for treating unlabeled data. You can now train Online Fraud Insights or Transaction Fraud Insights models with minimal historical-data. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.31.0](service/mediaconvert/CHANGELOG.md#v1310-2023-02-06) + * **Feature**: The AWS Elemental MediaConvert SDK has added improved scene change detection capabilities and a bandwidth reduction filter, along with video quality enhancements, to the AVC encoder. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.27.0](service/outposts/CHANGELOG.md#v1270-2023-02-06) + * **Feature**: Adds OrderType to Order structure. Adds PreviousOrderId and PreviousLineItemId to LineItem structure. Adds new line item status REPLACED. Increases maximum length of pagination token. + +# Release (2023-02-03) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.26.2](service/autoscaling/CHANGELOG.md#v1262-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.26.2](service/cloudformation/CHANGELOG.md#v1262-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.14.1](service/cloudsearch/CHANGELOG.md#v1141-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.25.2](service/cloudwatch/CHANGELOG.md#v1252-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.20.2](service/docdb/CHANGELOG.md#v1202-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.84.1](service/ec2/CHANGELOG.md#v1841-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.26.2](service/elasticache/CHANGELOG.md#v1262-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.15.1](service/elasticbeanstalk/CHANGELOG.md#v1151-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.15.2](service/elasticloadbalancing/CHANGELOG.md#v1152-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.19.3](service/elasticloadbalancingv2/CHANGELOG.md#v1193-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.2](service/iam/CHANGELOG.md#v1192-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.19.2](service/neptune/CHANGELOG.md#v1192-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.20.0](service/proton/CHANGELOG.md#v1200-2023-02-03) + * **Feature**: Add new GetResourcesSummary API +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.40.2](service/rds/CHANGELOG.md#v1402-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.27.2](service/redshift/CHANGELOG.md#v1272-2023-02-03) + * **Documentation**: Corrects descriptions of the parameters for the API operations RestoreFromClusterSnapshot, RestoreTableFromClusterSnapshot, and CreateCluster. + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.15.1](service/ses/CHANGELOG.md#v1151-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.20.1](service/sns/CHANGELOG.md#v1201-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.20.2](service/sqs/CHANGELOG.md#v1202-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.18.3](service/sts/CHANGELOG.md#v1183-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. + +# Release (2023-02-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.16.0](service/appconfig/CHANGELOG.md#v1160-2023-02-02) + * **Feature**: AWS AppConfig introduces KMS customer-managed key (CMK) encryption of configuration data, along with AWS Secrets Manager as a new configuration data source. S3 objects using SSE-KMS encryption and SSM Parameter Store SecureStrings are also now supported. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.84.0](service/ec2/CHANGELOG.md#v1840-2023-02-02) + * **Feature**: Documentation updates for EC2. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.19.2](service/elasticloadbalancingv2/CHANGELOG.md#v1192-2023-02-02) + * **Documentation**: The GWLB Flex Health Check project updates the default values of healthy-threshold-count from 3 to 5 and unhealthy-threshold-count from 3 to 2 +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.31.0](service/quicksight/CHANGELOG.md#v1310-2023-02-02) + * **Feature**: QuickSight support for Radar Chart and Dashboard Publish Options + +# Release (2023-02-01) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.22.0](service/devopsguru/CHANGELOG.md#v1220-2023-02-01) + * **Feature**: This release adds filter support ListAnomalyForInsight API. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.25.0](service/forecast/CHANGELOG.md#v1250-2023-02-01) + * **Feature**: This release will enable customer select INCREMENTAL as ImportModel in Forecast's CreateDatasetImportJob API. Verified latest SDK containing required attribute, following https://w.amazon.com/bin/view/AWS-Seer/Launch/Trebuchet/ +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.1](service/iam/CHANGELOG.md#v1191-2023-02-01) + * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.22.0](service/mediatailor/CHANGELOG.md#v1220-2023-02-01) + * **Feature**: The AWS Elemental MediaTailor SDK for Channel Assembly has added support for program updates, and the ability to clip the end of VOD sources in programs. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.20.0](service/sns/CHANGELOG.md#v1200-2023-02-01) + * **Feature**: Additional attributes added for set-topic-attributes. + +# Release (2023-01-31) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.19.0](service/appsync/CHANGELOG.md#v1190-2023-01-31) + * **Feature**: This release introduces the feature to support EventBridge as AppSync data source. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.23.0](service/cloudtrail/CHANGELOG.md#v1230-2023-01-31) + * **Feature**: Add new "Channel" APIs to enable users to manage channels used for CloudTrail Lake integrations, and "Resource Policy" APIs to enable users to manage the resource-based permissions policy attached to a channel. +* `github.com/aws/aws-sdk-go-v2/service/cloudtraildata`: [v1.0.0](service/cloudtraildata/CHANGELOG.md#v100-2023-01-31) + * **Release**: New AWS service client module + * **Feature**: Add CloudTrail Data Service to enable users to ingest activity events from non-AWS sources into CloudTrail Lake. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.16.0](service/codeartifact/CHANGELOG.md#v1160-2023-01-31) + * **Feature**: This release introduces a new DeletePackage API, which enables deletion of a package and all of its versions from a repository. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.83.0](service/ec2/CHANGELOG.md#v1830-2023-01-31) + * **Feature**: This launch allows customers to associate up to 8 IP addresses to their NAT Gateways to increase the limit on concurrent connections to a single destination by eight times from 55K to 440K. +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.17.0](service/groundstation/CHANGELOG.md#v1170-2023-01-31) + * **Feature**: DigIF Expansion changes to the Customer APIs. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.34.0](service/iot/CHANGELOG.md#v1340-2023-01-31) + * **Feature**: Added support for IoT Rules Engine Cloudwatch Logs action batch mode. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.14.0](service/opensearch/CHANGELOG.md#v1140-2023-01-31) + * **Feature**: Amazon OpenSearch Service adds the option for a VPC endpoint connection between two domains when the local domain uses OpenSearch version 1.3 or 2.3. You can now use remote reindex to copy indices from one VPC domain to another without a reverse proxy. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.24.0](service/polly/CHANGELOG.md#v1240-2023-01-31) + * **Feature**: Amazon Polly adds two new neural American English voices - Ruth, Stephen +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.67.0](service/sagemaker/CHANGELOG.md#v1670-2023-01-31) + * **Feature**: Amazon SageMaker Automatic Model Tuning now supports more completion criteria for Hyperparameter Optimization. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.28.0](service/securityhub/CHANGELOG.md#v1280-2023-01-31) + * **Feature**: New fields have been added to the AWS Security Finding Format. Compliance.SecurityControlId is a unique identifier for a security control across standards. Compliance.AssociatedStandards contains all enabled standards in which a security control is enabled. + +# Release (2023-01-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.26.0](service/cloudformation/CHANGELOG.md#v1260-2023-01-30) + * **Feature**: This feature provides a method of obtaining which regions a stackset has stack instances deployed in. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.82.0](service/ec2/CHANGELOG.md#v1820-2023-01-30) + * **Feature**: We add Prefix Lists as a new route destination option for LocalGatewayRoutes. This will allow customers to create routes to Prefix Lists. Prefix List routes will allow customers to group individual CIDR routes with the same target into a single route. + +# Release (2023-01-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.20.0](service/appstream/CHANGELOG.md#v1200-2023-01-27) + * **Feature**: Fixing the issue where Appstream waiters hang for fleet_started and fleet_stopped. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.21.0](service/mediatailor/CHANGELOG.md#v1210-2023-01-27) + * **Feature**: This release introduces the As Run logging type, along with API and documentation updates. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.26.0](service/outposts/CHANGELOG.md#v1260-2023-01-27) + * **Feature**: Adding support for payment term in GetOrder, CreateOrder responses. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.66.0](service/sagemaker/CHANGELOG.md#v1660-2023-01-27) + * **Feature**: This release supports running SageMaker Training jobs with container images that are in a private Docker registry. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.18.0](service/sagemakerruntime/CHANGELOG.md#v1180-2023-01-27) + * **Feature**: Amazon SageMaker Runtime which supports InvokeEndpointAsync asynchronously can now invoke endpoints with custom timeout values. Asynchronous invocations support longer processing times. + +# Release (2023-01-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.18.0](service/eventbridge/CHANGELOG.md#v1180-2023-01-26) + * **Feature**: Minor comments for Redshift Serverless workgroup target support. + +# Release (2023-01-25) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.81.0](service/ec2/CHANGELOG.md#v1810-2023-01-25) + * **Feature**: This release adds new functionality that allows customers to provision IPv6 CIDR blocks through Amazon VPC IP Address Manager (IPAM) as well as allowing customers to utilize IPAM Resource Discovery APIs. +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.4.0](service/m2/CHANGELOG.md#v140-2023-01-25) + * **Feature**: Add returnCode, batchJobIdentifier in GetBatchJobExecution response, for user to view the batch job execution result & unique identifier from engine. Also removed unused headers from REST APIs +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.23.0](service/polly/CHANGELOG.md#v1230-2023-01-25) + * **Feature**: Add 5 new neural voices - Sergio (es-ES), Andres (es-MX), Remi (fr-FR), Adriano (it-IT) and Thiago (pt-BR). +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.4.1](service/redshiftserverless/CHANGELOG.md#v141-2023-01-25) + * **Documentation**: Added query monitoring rules as possible parameters for create and update workgroup operations. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.65.0](service/sagemaker/CHANGELOG.md#v1650-2023-01-25) + * **Feature**: SageMaker Inference Recommender now decouples from Model Registry and could accept Model Name to invoke inference recommendations job; Inference Recommender now provides CPU/Memory Utilization metrics data in recommendation output. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.18.2](service/sts/CHANGELOG.md#v1182-2023-01-25) + * **Documentation**: Doc only change to update wording in a key topic + +# Release (2023-01-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.27.0](service/route53/CHANGELOG.md#v1270-2023-01-24) + * **Feature**: Amazon Route 53 now supports the Asia Pacific (Melbourne) Region (ap-southeast-4) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.2.0](service/ssmsap/CHANGELOG.md#v120-2023-01-24) + * **Feature**: This release provides updates to documentation and support for listing operations performed by AWS Systems Manager for SAP. + +# Release (2023-01-23) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.29.0](service/lambda/CHANGELOG.md#v1290-2023-01-23) + * **Feature**: Release Lambda RuntimeManagementConfig, enabling customers to better manage runtime updates to their Lambda functions. This release adds two new APIs, GetRuntimeManagementConfig and PutRuntimeManagementConfig, as well as support on existing Create/Get/Update function APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.64.0](service/sagemaker/CHANGELOG.md#v1640-2023-01-23) + * **Feature**: Amazon SageMaker Inference now supports P4de instance types. + +# Release (2023-01-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.80.0](service/ec2/CHANGELOG.md#v1800-2023-01-20) + * **Feature**: C6in, M6in, M6idn, R6in and R6idn instances are powered by 3rd Generation Intel Xeon Scalable processors (code named Ice Lake) with an all-core turbo frequency of 3.5 GHz. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.0](service/ivs/CHANGELOG.md#v1200-2023-01-20) + * **Feature**: API and Doc update. Update to arns field in BatchGetStreamKey. Also updates to operations and structures. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.30.0](service/quicksight/CHANGELOG.md#v1300-2023-01-20) + * **Feature**: This release adds support for data bars in QuickSight table and increases pivot table field well limit. + +# Release (2023-01-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.24.0](service/appflow/CHANGELOG.md#v1240-2023-01-19) + * **Feature**: Adding support for Salesforce Pardot connector in Amazon AppFlow. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.20.0](service/cloudwatchlogs/CHANGELOG.md#v1200-2023-01-19) + * **Feature**: Bug fix - Removed the regex pattern validation from CoralModel to avoid potential security issue. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.15.0](service/codeartifact/CHANGELOG.md#v1150-2023-01-19) + * **Feature**: Documentation updates for CodeArtifact +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.45.0](service/connect/CHANGELOG.md#v1450-2023-01-19) + * **Feature**: Amazon Connect Chat introduces Persistent Chat, allowing customers to resume previous conversations with context and transcripts carried over from previous chats, eliminating the need to repeat themselves and allowing agents to provide personalized service with access to entire conversation history. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.15.0](service/connectparticipant/CHANGELOG.md#v1150-2023-01-19) + * **Feature**: This release updates Amazon Connect Participant's GetTranscript api to provide transcripts of past chats on a persistent chat session. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.79.0](service/ec2/CHANGELOG.md#v1790-2023-01-19) + * **Feature**: Adds SSM Parameter Resource Aliasing support to EC2 Launch Templates. Launch Templates can now store parameter aliases in place of AMI Resource IDs. CreateLaunchTemplateVersion and DescribeLaunchTemplateVersions now support a convenience flag, ResolveAlias, to return the resolved parameter value. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.40.0](service/glue/CHANGELOG.md#v1400-2023-01-19) + * **Feature**: Release Glue Studio Hudi Data Lake Format for SDK/CLI +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.16.0](service/groundstation/CHANGELOG.md#v1160-2023-01-19) + * **Feature**: Add configurable prepass and postpass times for DataflowEndpointGroup. Add Waiter to allow customers to wait for a contact that was reserved through ReserveContact +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.29.0](service/medialive/CHANGELOG.md#v1290-2023-01-19) + * **Feature**: AWS Elemental MediaLive adds support for SCTE 35 preRollMilliSeconds. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.13.0](service/opensearch/CHANGELOG.md#v1130-2023-01-19) + * **Feature**: This release adds the enhanced dry run option, that checks for validation errors that might occur when deploying configuration changes and provides a summary of these errors, if any. The feature will also indicate whether a blue/green deployment will be required to apply a change. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.11.0](service/panorama/CHANGELOG.md#v1110-2023-01-19) + * **Feature**: Added AllowMajorVersionUpdate option to OTAJobConfig to make appliance software major version updates opt-in. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.63.0](service/sagemaker/CHANGELOG.md#v1630-2023-01-19) + * **Feature**: HyperParameterTuningJobs now allow passing environment variables into the corresponding TrainingJobs + +# Release (2023-01-18) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.25.0](service/cloudwatch/CHANGELOG.md#v1250-2023-01-18) + * **Feature**: Enable cross-account streams in CloudWatch Metric Streams via Observability Access Manager. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.19.1](service/efs/CHANGELOG.md#v1191-2023-01-18) + * **Documentation**: Documentation updates for EFS access points limit increase +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.24.2](service/wafv2/CHANGELOG.md#v1242-2023-01-18) + * **Documentation**: Improved the visibility of the guidance for updating AWS WAF resources, such as web ACLs and rule groups. + +# Release (2023-01-17) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.5.0](service/billingconductor/CHANGELOG.md#v150-2023-01-17) + * **Feature**: This release adds support for SKU Scope for pricing plans. +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.22.0](service/imagebuilder/CHANGELOG.md#v1220-2023-01-17) + * **Feature**: Add support for AWS Marketplace product IDs as input during CreateImageRecipe for the parent-image parameter. Add support for listing third-party components. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.24.0](service/networkfirewall/CHANGELOG.md#v1240-2023-01-17) + * **Feature**: Network Firewall now allows creation of dual stack endpoints, enabling inspection of IPv6 traffic. + +# Release (2023-01-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.44.0](service/connect/CHANGELOG.md#v1440-2023-01-13) + * **Feature**: This release updates the responses of UpdateContactFlowContent, UpdateContactFlowMetadata, UpdateContactFlowName and DeleteContactFlow API with empty responses. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.78.0](service/ec2/CHANGELOG.md#v1780-2023-01-13) + * **Feature**: Documentation updates for EC2. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.25.0](service/outposts/CHANGELOG.md#v1250-2023-01-13) + * **Feature**: This release adds POWER_30_KVA as an option for PowerDrawKva. PowerDrawKva is part of the RackPhysicalProperties structure in the CreateSite request. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.14.0](service/resourcegroups/CHANGELOG.md#v1140-2023-01-13) + * **Feature**: AWS Resource Groups customers can now turn on Group Lifecycle Events in their AWS account. When you turn this on, Resource Groups monitors your groups for changes to group state or membership. Those changes are sent to Amazon EventBridge as events that you can respond to using rules you create. + +# Release (2023-01-12) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.0.0](service/cleanrooms/CHANGELOG.md#v100-2023-01-12) + * **Release**: New AWS service client module + * **Feature**: Initial release of AWS Clean Rooms +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.19.0](service/cloudwatchlogs/CHANGELOG.md#v1190-2023-01-12) + * **Feature**: Bug fix: logGroupName is now not a required field in GetLogEvents, FilterLogEvents, GetLogGroupFields, and DescribeLogStreams APIs as logGroupIdentifier can be provided instead +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.28.0](service/lambda/CHANGELOG.md#v1280-2023-01-12) + * **Feature**: Add support for MaximumConcurrency parameter for SQS event source. Customers can now limit the maximum concurrent invocations for their SQS Event Source Mapping. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.30.0](service/mediaconvert/CHANGELOG.md#v1300-2023-01-12) + * **Feature**: The AWS Elemental MediaConvert SDK has added support for compact DASH manifest generation, audio normalization using TruePeak measurements, and the ability to clip the sample range in the color corrector. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.18.1](service/secretsmanager/CHANGELOG.md#v1181-2023-01-12) + * **Documentation**: Update documentation for new ListSecrets and DescribeSecret parameters + +# Release (2023-01-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.38.0](service/kendra/CHANGELOG.md#v1380-2023-01-11) + * **Feature**: This release adds support to new document types - RTF, XML, XSLT, MS_EXCEL, CSV, JSON, MD + +# Release (2023-01-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.21.0](service/location/CHANGELOG.md#v1210-2023-01-10) + * **Feature**: This release adds support for two new route travel models, Bicycle and Motorcycle which can be used with Grab data source. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.40.0](service/rds/CHANGELOG.md#v1400-2023-01-10) + * **Feature**: This release adds support for configuring allocated storage on the CreateDBInstanceReadReplica, RestoreDBInstanceFromDBSnapshot, and RestoreDBInstanceToPointInTime APIs. + +# Release (2023-01-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.15.0](service/ecrpublic/CHANGELOG.md#v1150-2023-01-09) + * **Feature**: This release for Amazon ECR Public makes several change to bring the SDK into sync with the API. +* `github.com/aws/aws-sdk-go-v2/service/kendraranking`: [v1.0.0](service/kendraranking/CHANGELOG.md#v100-2023-01-09) + * **Release**: New AWS service client module + * **Feature**: Introducing Amazon Kendra Intelligent Ranking, a new set of Kendra APIs that leverages Kendra semantic ranking capabilities to improve the quality of search results from other search services (i.e. OpenSearch, ElasticSearch, Solr). +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.23.0](service/networkfirewall/CHANGELOG.md#v1230-2023-01-09) + * **Feature**: Network Firewall now supports the Suricata rule action reject, in addition to the actions pass, drop, and alert. +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.9.0](service/workspacesweb/CHANGELOG.md#v190-2023-01-09) + * **Feature**: This release adds support for a new portal authentication type: AWS IAM Identity Center (successor to AWS Single Sign-On). + +# Release (2023-01-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.21.0](service/acmpca/CHANGELOG.md#v1210-2023-01-06) + * **Feature**: Added revocation parameter validation: bucket names must match S3 bucket naming rules and CNAMEs conform to RFC2396 restrictions on the use of special characters in URIs. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.23.0](service/auditmanager/CHANGELOG.md#v1230-2023-01-06) + * **Feature**: This release introduces a new data retention option in your Audit Manager settings. You can now use the DeregistrationPolicy parameter to specify if you want to delete your data when you deregister Audit Manager. + +# Release (2023-01-05) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.19.0](service/accessanalyzer/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.8.0](service/account/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.17.0](service/acm/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.20.0](service/acmpca/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.15.0](service/alexaforbusiness/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.16.0](service/amp/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.13.0](service/amplify/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.14.0](service/amplifybackend/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: Updated GetBackendAPIModels response to include ModelIntrospectionSchema json string +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.9.0](service/amplifyuibuilder/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.16.0](service/apigateway/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.11.0](service/apigatewaymanagementapi/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.13.0](service/apigatewayv2/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.15.0](service/appconfig/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.5.0](service/appconfigdata/CHANGELOG.md#v150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.23.0](service/appflow/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.14.0](service/appintegrations/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.17.0](service/applicationautoscaling/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.10.0](service/applicationcostprofiler/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.15.0](service/applicationdiscoveryservice/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.17.0](service/applicationinsights/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.17.0](service/appmesh/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.16.0](service/apprunner/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: This release adds support of securely referencing secrets and configuration data that are stored in Secrets Manager and SSM Parameter Store by adding them as environment secrets in your App Runner service. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.19.0](service/appstream/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.18.0](service/appsync/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.1.0](service/arczonalshift/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.22.0](service/athena/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.22.0](service/auditmanager/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.26.0](service/autoscaling/CHANGELOG.md#v1260-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.13.0](service/autoscalingplans/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.19.0](service/backup/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.9.0](service/backupgateway/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.1.0](service/backupstorage/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.21.0](service/batch/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.4.0](service/billingconductor/CHANGELOG.md#v140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.17.0](service/braket/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.14.0](service/budgets/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.22.0](service/chime/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.10.0](service/chimesdkidentity/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.2.0](service/chimesdkmediapipelines/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.14.0](service/chimesdkmeetings/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.12.0](service/chimesdkmessaging/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.1.0](service/chimesdkvoice/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.17.0](service/cloud9/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.11.0](service/cloudcontrol/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.13.0](service/clouddirectory/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.25.0](service/cloudformation/CHANGELOG.md#v1250-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.24.0](service/cloudfront/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.13.0](service/cloudhsm/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.14.0](service/cloudhsmv2/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.14.0](service/cloudsearch/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.12.0](service/cloudsearchdomain/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.22.0](service/cloudtrail/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.24.0](service/cloudwatch/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.15.0](service/cloudwatchevents/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.18.0](service/cloudwatchlogs/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.14.0](service/codeartifact/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.20.0](service/codebuild/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.1.0](service/codecatalyst/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.14.0](service/codecommit/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.16.0](service/codedeploy/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.13.0](service/codeguruprofiler/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.17.0](service/codegurureviewer/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.14.0](service/codepipeline/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.13.0](service/codestar/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codestarconnections`: [v1.14.0](service/codestarconnections/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.14.0](service/codestarnotifications/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.15.0](service/cognitoidentity/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.22.0](service/cognitoidentityprovider/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.12.0](service/cognitosync/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.21.0](service/comprehend/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.15.0](service/comprehendmedical/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.20.0](service/computeoptimizer/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.29.0](service/configservice/CHANGELOG.md#v1290-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.43.0](service/connect/CHANGELOG.md#v1430-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: Documentation update for a new Initiation Method value in DescribeContact API +* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.2.0](service/connectcampaigns/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.2.0](service/connectcases/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.13.0](service/connectcontactlens/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.14.0](service/connectparticipant/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.1.0](service/controltower/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.15.0](service/costandusagereportservice/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.25.0](service/costexplorer/CHANGELOG.md#v1250-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.22.0](service/customerprofiles/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.23.0](service/databasemigrationservice/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.21.0](service/databrew/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.18.0](service/dataexchange/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.14.0](service/datapipeline/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.21.0](service/datasync/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.12.0](service/dax/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.18.0](service/detective/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.15.0](service/devicefarm/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.21.0](service/devopsguru/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.0](service/directconnect/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.16.0](service/directoryservice/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.14.0](service/dlm/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.20.0](service/docdb/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.1.0](service/docdbelastic/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.10.0](service/drs/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.18.0](service/dynamodb/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.14.0](service/dynamodbstreams/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.16.0](service/ebs/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.15.0](service/ec2instanceconnect/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.18.0](service/ecr/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.14.0](service/ecrpublic/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.23.0](service/ecs/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.19.0](service/efs/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.27.0](service/eks/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.26.0](service/elasticache/CHANGELOG.md#v1260-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.15.0](service/elasticbeanstalk/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.12.0](service/elasticinference/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.15.0](service/elasticloadbalancing/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.19.0](service/elasticloadbalancingv2/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.18.0](service/elasticsearchservice/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.14.0](service/elastictranscoder/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.22.0](service/emr/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.16.0](service/emrcontainers/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.5.0](service/emrserverless/CHANGELOG.md#v150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: Adds support for customized images. You can now provide runtime images when creating or updating EMR Serverless Applications. +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.17.0](service/eventbridge/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.10.0](service/evidently/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.9.0](service/finspace/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.14.0](service/finspacedata/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.16.0](service/firehose/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.14.0](service/fis/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.22.0](service/fms/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.24.0](service/forecast/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.13.0](service/forecastquery/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.21.0](service/frauddetector/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.28.0](service/fsx/CHANGELOG.md#v1280-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.17.0](service/gamelift/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.2.0](service/gamesparks/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.14.0](service/glacier/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.16.0](service/globalaccelerator/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.39.0](service/glue/CHANGELOG.md#v1390-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.11.0](service/grafana/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.15.0](service/greengrass/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.21.0](service/greengrassv2/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.15.0](service/groundstation/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.17.0](service/guardduty/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.16.0](service/health/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.15.0](service/healthlake/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.13.0](service/honeycode/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.0](service/iam/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.16.0](service/identitystore/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.21.0](service/imagebuilder/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.13.0](service/inspector/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.11.0](service/inspector2/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.33.0](service/iot/CHANGELOG.md#v1330-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.11.0](service/iot1clickdevicesservice/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.12.0](service/iot1clickprojects/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.14.0](service/iotanalytics/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.14.0](service/iotdataplane/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.17.0](service/iotdeviceadvisor/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.15.0](service/iotevents/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.13.0](service/ioteventsdata/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.13.0](service/iotfleethub/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.3.0](service/iotfleetwise/CHANGELOG.md#v130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.12.0](service/iotjobsdataplane/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.1.0](service/iotroborunner/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.15.0](service/iotsecuretunneling/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.27.0](service/iotsitewise/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.14.0](service/iotthingsgraph/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.10.0](service/iottwinmaker/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.24.0](service/iotwireless/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.19.0](service/ivs/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.3.0](service/ivschat/CHANGELOG.md#v130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.19.0](service/kafka/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.9.0](service/kafkaconnect/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.37.0](service/kendra/CHANGELOG.md#v1370-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.1.0](service/keyspaces/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.17.0](service/kinesis/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.14.0](service/kinesisanalytics/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.16.0](service/kinesisanalyticsv2/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.15.0](service/kinesisvideo/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.14.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.11.0](service/kinesisvideomedia/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.11.0](service/kinesisvideosignaling/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.2.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.20.0](service/kms/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.19.0](service/lakeformation/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.27.0](service/lambda/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.17.0](service/lexmodelbuildingservice/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.27.0](service/lexmodelsv2/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.13.0](service/lexruntimeservice/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.16.0](service/lexruntimev2/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.17.0](service/licensemanager/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.1.0](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.2.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.25.0](service/lightsail/CHANGELOG.md#v1250-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Documentation**: Documentation updates for Amazon Lightsail. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.20.0](service/location/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.17.0](service/lookoutequipment/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.19.0](service/lookoutmetrics/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.15.0](service/lookoutvision/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.3.0](service/m2/CHANGELOG.md#v130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.15.0](service/machinelearning/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.15.0](service/macie/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.26.0](service/macie2/CHANGELOG.md#v1260-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.14.0](service/managedblockchain/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.15.0](service/marketplacecatalog/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.12.0](service/marketplacecommerceanalytics/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.12.0](service/marketplaceentitlementservice/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.14.0](service/marketplacemetering/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.18.0](service/mediaconnect/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.29.0](service/mediaconvert/CHANGELOG.md#v1290-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.28.0](service/medialive/CHANGELOG.md#v1280-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.20.0](service/mediapackage/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.21.0](service/mediapackagevod/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.13.0](service/mediastore/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.13.0](service/mediastoredata/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.20.0](service/mediatailor/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.12.0](service/memorydb/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.17.0](service/mgn/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.13.0](service/migrationhub/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.13.0](service/migrationhubconfig/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhuborchestrator`: [v1.1.0](service/migrationhuborchestrator/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.8.0](service/migrationhubrefactorspaces/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.7.0](service/migrationhubstrategy/CHANGELOG.md#v170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.12.0](service/mobile/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.14.0](service/mq/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.14.0](service/mturk/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.14.0](service/mwaa/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Documentation**: MWAA supports Apache Airflow version 2.4.3. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.19.0](service/neptune/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.22.0](service/networkfirewall/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.0](service/networkmanager/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.16.0](service/nimble/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.1.0](service/oam/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.1.0](service/omics/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.12.0](service/opensearch/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.1.0](service/opensearchserverless/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.14.0](service/opsworks/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.15.0](service/opsworkscm/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.18.0](service/organizations/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.24.0](service/outposts/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.10.0](service/panorama/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.23.0](service/personalize/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.13.0](service/personalizeevents/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.13.0](service/personalizeruntime/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.16.0](service/pi/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.18.0](service/pinpoint/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.12.0](service/pinpointemail/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.11.0](service/pinpointsmsvoice/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.1.0](service/pinpointsmsvoicev2/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.1.0](service/pipes/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.22.0](service/polly/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.18.0](service/pricing/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/privatenetworks`: [v1.1.0](service/privatenetworks/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.19.0](service/proton/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.15.0](service/qldb/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.14.0](service/qldbsession/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.29.0](service/quicksight/CHANGELOG.md#v1290-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.17.0](service/ram/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.8.0](service/rbin/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.39.0](service/rds/CHANGELOG.md#v1390-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: This release adds support for specifying which certificate authority (CA) to use for a DB instance's server certificate during DB instance creation, as well as other CA enhancements. +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.13.0](service/rdsdata/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.27.0](service/redshift/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.18.0](service/redshiftdata/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.4.0](service/redshiftserverless/CHANGELOG.md#v140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.23.0](service/rekognition/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.8.0](service/resiliencehub/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.2.0](service/resourceexplorer2/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.13.0](service/resourcegroups/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.14.0](service/resourcegroupstaggingapi/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.18.0](service/robomaker/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.1.0](service/rolesanywhere/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.26.0](service/route53/CHANGELOG.md#v1260-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.14.0](service/route53domains/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.11.0](service/route53recoverycluster/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.11.0](service/route53recoverycontrolconfig/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.9.0](service/route53recoveryreadiness/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.16.0](service/route53resolver/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.9.0](service/rum/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.30.0](service/s3/CHANGELOG.md#v1300-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.29.0](service/s3control/CHANGELOG.md#v1290-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.14.0](service/s3outposts/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.62.0](service/sagemaker/CHANGELOG.md#v1620-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.15.0](service/sagemakera2iruntime/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.13.0](service/sagemakeredge/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.13.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.1.0](service/sagemakergeospatial/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.17.0](service/sagemakerruntime/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.12.0](service/savingsplans/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.1.0](service/scheduler/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.15.0](service/schemas/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.18.0](service/secretsmanager/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.27.0](service/securityhub/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.2.0](service/securitylake/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.12.0](service/serverlessapplicationrepository/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.16.0](service/servicecatalog/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.16.0](service/servicecatalogappregistry/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.19.0](service/servicediscovery/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.14.0](service/servicequotas/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.15.0](service/ses/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.16.0](service/sesv2/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.17.0](service/sfn/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.18.0](service/shield/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.14.0](service/signer/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.1.0](service/simspaceweaver/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.13.0](service/sms/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.17.0](service/snowball/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.9.0](service/snowdevicemanagement/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.19.0](service/sns/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.20.0](service/sqs/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.35.0](service/ssm/CHANGELOG.md#v1350-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.14.0](service/ssmcontacts/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.20.0](service/ssmincidents/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.1.0](service/ssmsap/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.12.0](service/sso/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.16.0](service/ssoadmin/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.14.0](service/ssooidc/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.18.0](service/storagegateway/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.18.0](service/sts/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.14.0](service/support/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.2.0](service/supportapp/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.14.0](service/swf/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.17.0](service/synthetics/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.20.0](service/textract/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.15.0](service/timestreamquery/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.15.0](service/timestreamwrite/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.25.0](service/transcribe/CHANGELOG.md#v1250-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.9.0](service/transcribestreaming/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.28.0](service/transfer/CHANGELOG.md#v1280-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.17.0](service/translate/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.12.0](service/voiceid/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.12.0](service/waf/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.13.0](service/wafregional/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.24.0](service/wafv2/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.18.0](service/wellarchitected/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.12.0](service/wisdom/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.13.0](service/workdocs/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.13.0](service/worklink/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.18.0](service/workmail/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.12.0](service/workmailmessageflow/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.0](service/workspaces/CHANGELOG.md#v1280-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.8.0](service/workspacesweb/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.16.0](service/xray/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# Release (2023-01-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.16.0](service/applicationautoscaling/CHANGELOG.md#v1160-2023-01-04) + * **Feature**: Customers can now use the existing DescribeScalingActivities API to also see the detailed and machine-readable reasons for Application Auto Scaling not scaling their resources and, if needed, take the necessary corrective actions. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.17.4](service/cloudwatchlogs/CHANGELOG.md#v1174-2023-01-04) + * **Documentation**: Update to remove sequenceToken as a required field in PutLogEvents calls. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.34.0](service/ssm/CHANGELOG.md#v1340-2023-01-04) + * **Feature**: Adding support for QuickSetup Document Type in Systems Manager + +# Release (2023-01-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.1.0](service/securitylake/CHANGELOG.md#v110-2023-01-03) + * **Feature**: Allow CreateSubscriber API to take string input that allows setting more descriptive SubscriberDescription field. Make souceTypes field required in model level for UpdateSubscriberRequest as it is required for every API call on the backend. Allow ListSubscribers take any String as nextToken param. + +# Release (2022-12-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.23.0](service/cloudfront/CHANGELOG.md#v1230-2022-12-30) + * **Feature**: Extend response headers policy to support removing headers from viewer responses +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.2.1](service/iotfleetwise/CHANGELOG.md#v121-2022-12-30) + * **Documentation**: Update documentation - correct the epoch constant value of default value for expiryTime field in CreateCampaign request. + +# Release (2022-12-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.15.28](service/apigateway/CHANGELOG.md#v11528-2022-12-29) + * **Documentation**: Documentation updates for Amazon API Gateway +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.21.0](service/emr/CHANGELOG.md#v1210-2022-12-29) + * **Feature**: Added GetClusterSessionCredentials API to allow Amazon SageMaker Studio to connect to EMR on EC2 clusters with runtime roles and AWS Lake Formation-based access control for Apache Spark, Apache Hive, and Presto queries. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.17.0](service/secretsmanager/CHANGELOG.md#v1170-2022-12-29) + * **Feature**: Added owning service filter, include planned deletion flag, and next rotation date response parameter in ListSecrets. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.11.0](service/wisdom/CHANGELOG.md#v1110-2022-12-29) + * **Feature**: This release extends Wisdom CreateContent and StartContentUpload APIs to support PDF and MicrosoftWord docx document uploading. + +# Release (2022-12-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.25.0](service/elasticache/CHANGELOG.md#v1250-2022-12-28) + * **Feature**: This release allows you to modify the encryption in transit setting, for existing Redis clusters. You can now change the TLS configuration of your Redis clusters without the need to re-build or re-provision the clusters or impact application availability. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.21.0](service/networkfirewall/CHANGELOG.md#v1210-2022-12-28) + * **Feature**: AWS Network Firewall now provides status messages for firewalls to help you troubleshoot when your endpoint fails. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.38.0](service/rds/CHANGELOG.md#v1380-2022-12-28) + * **Feature**: This release adds support for Custom Engine Version (CEV) on RDS Custom SQL Server. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.10.0](service/route53recoverycontrolconfig/CHANGELOG.md#v1100-2022-12-28) + * **Feature**: Added support for Python paginators in the route53-recovery-control-config List* APIs. + +# Release (2022-12-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.11.0](service/memorydb/CHANGELOG.md#v1110-2022-12-27) + * **Feature**: This release adds support for MemoryDB Reserved nodes which provides a significant discount compared to on-demand node pricing. Reserved nodes are not physical nodes, but rather a billing discount applied to the use of on-demand nodes in your account. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.27.0](service/transfer/CHANGELOG.md#v1270-2022-12-27) + * **Feature**: Add additional operations to throw ThrottlingExceptions + +# Release (2022-12-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.42.0](service/connect/CHANGELOG.md#v1420-2022-12-23) + * **Feature**: Support for Routing Profile filter, SortCriteria, and grouping by Routing Profiles for GetCurrentMetricData API. Support for RoutingProfiles, UserHierarchyGroups, and Agents as filters, NextStatus and AgentStatusName for GetCurrentUserData. Adds ApproximateTotalCount to both APIs. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.13.0](service/connectparticipant/CHANGELOG.md#v1130-2022-12-23) + * **Feature**: Amazon Connect Chat introduces the Message Receipts feature. This feature allows agents and customers to receive message delivered and read receipts after they send a chat message. +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.17.0](service/detective/CHANGELOG.md#v1170-2022-12-23) + * **Feature**: This release adds a missed AccessDeniedException type to several endpoints. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.27.0](service/fsx/CHANGELOG.md#v1270-2022-12-23) + * **Feature**: Fix a bug where a recent release might break certain existing SDKs. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.10.0](service/inspector2/CHANGELOG.md#v1100-2022-12-23) + * **Feature**: Amazon Inspector adds support for scanning NodeJS 18.x and Go 1.x AWS Lambda function runtimes. + +# Release (2022-12-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.19.0](service/computeoptimizer/CHANGELOG.md#v1190-2022-12-22) + * **Feature**: This release enables AWS Compute Optimizer to analyze and generate optimization recommendations for ecs services running on Fargate. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.41.0](service/connect/CHANGELOG.md#v1410-2022-12-22) + * **Feature**: Amazon Connect Chat introduces the Idle Participant/Autodisconnect feature, which allows users to set timeouts relating to the activity of chat participants, using the new UpdateParticipantRoleConfig API. +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.16.0](service/iotdeviceadvisor/CHANGELOG.md#v1160-2022-12-22) + * **Feature**: This release adds the following new features: 1) Documentation updates for IoT Device Advisor APIs. 2) Updated required request parameters for IoT Device Advisor APIs. 3) Added new service feature: ability to provide the test endpoint when customer executing the StartSuiteRun API. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.1.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v110-2022-12-22) + * **Feature**: Amazon Kinesis Video Streams offers capabilities to stream video and audio in real-time via WebRTC to the cloud for storage, playback, and analytical processing. Customers can use our enhanced WebRTC SDK and cloud APIs to enable real-time streaming, as well as media ingestion to the cloud. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.37.0](service/rds/CHANGELOG.md#v1370-2022-12-22) + * **Feature**: Add support for managing master user password in AWS Secrets Manager for the DBInstance and DBCluster. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.11](service/secretsmanager/CHANGELOG.md#v11611-2022-12-22) + * **Documentation**: Documentation updates for Secrets Manager + +# Release (2022-12-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.0.0](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v100-2022-12-21) + * **Release**: New AWS service client module + * **Feature**: AWS License Manager now offers cross-region, cross-account tracking of commercial Linux subscriptions on AWS. This includes subscriptions purchased as part of EC2 subscription-included AMIs, on the AWS Marketplace, or brought to AWS via Red Hat Cloud Access Program. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.25.0](service/macie2/CHANGELOG.md#v1250-2022-12-21) + * **Feature**: This release adds support for analyzing Amazon S3 objects that use the S3 Glacier Instant Retrieval (Glacier_IR) storage class. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.61.0](service/sagemaker/CHANGELOG.md#v1610-2022-12-21) + * **Feature**: This release enables adding RStudio Workbench support to an existing Amazon SageMaker Studio domain. It allows setting your RStudio on SageMaker environment configuration parameters and also updating the RStudioConnectUrl and RStudioPackageManagerUrl parameters for existing domains +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.33.4](service/ssm/CHANGELOG.md#v1334-2022-12-21) + * **Documentation**: Doc-only updates for December 2022. +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.13.22](service/support/CHANGELOG.md#v11322-2022-12-21) + * **Documentation**: Documentation updates for the AWS Support API +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.26.0](service/transfer/CHANGELOG.md#v1260-2022-12-21) + * **Feature**: This release adds support for Decrypt as a workflow step type. + +# Release (2022-12-20) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.20.0](service/batch/CHANGELOG.md#v1200-2022-12-20) + * **Feature**: Adds isCancelled and isTerminated to DescribeJobs response. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.77.0](service/ec2/CHANGELOG.md#v1770-2022-12-20) + * **Feature**: Adds support for pagination in the EC2 DescribeImages API. +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.16.0](service/lookoutequipment/CHANGELOG.md#v1160-2022-12-20) + * **Feature**: This release adds support for listing inference schedulers by status. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.27.0](service/medialive/CHANGELOG.md#v1270-2022-12-20) + * **Feature**: This release adds support for two new features to AWS Elemental MediaLive. First, you can now burn-in timecodes to your MediaLive outputs. Second, we now now support the ability to decode Dolby E audio when it comes in on an input. +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.15.0](service/nimble/CHANGELOG.md#v1150-2022-12-20) + * **Feature**: Amazon Nimble Studio now supports configuring session storage volumes and persistence, as well as backup and restore sessions through launch profiles. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.1.0](service/resourceexplorer2/CHANGELOG.md#v110-2022-12-20) + * **Feature**: Documentation updates for AWS Resource Explorer. +* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.13.0](service/route53domains/CHANGELOG.md#v1130-2022-12-20) + * **Feature**: Use Route 53 domain APIs to change owner, create/delete DS record, modify IPS tag, resend authorization. New: AssociateDelegationSignerToDomain, DisassociateDelegationSignerFromDomain, PushDomain, ResendOperationAuthorization. Updated: UpdateDomainContact, ListOperations, CheckDomainTransferability. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.60.0](service/sagemaker/CHANGELOG.md#v1600-2022-12-20) + * **Feature**: Amazon SageMaker Autopilot adds support for new objective metrics in CreateAutoMLJob API. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.24.0](service/transcribe/CHANGELOG.md#v1240-2022-12-20) + * **Feature**: Enable our batch transcription jobs for Swedish and Vietnamese. + +# Release (2022-12-19) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.21.0](service/athena/CHANGELOG.md#v1210-2022-12-19) + * **Feature**: Add missed InvalidRequestException in GetCalculationExecutionCode,StopCalculationExecution APIs. Correct required parameters (Payload and Type) in UpdateNotebook API. Change Notebook size from 15 Mb to 10 Mb. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.22.0](service/ecs/CHANGELOG.md#v1220-2022-12-19) + * **Feature**: This release adds support for alarm-based rollbacks in ECS, a new feature that allows customers to add automated safeguards for Amazon ECS service rolling updates. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.14.0](service/kinesisvideo/CHANGELOG.md#v1140-2022-12-19) + * **Feature**: Amazon Kinesis Video Streams offers capabilities to stream video and audio in real-time via WebRTC to the cloud for storage, playback, and analytical processing. Customers can use our enhanced WebRTC SDK and cloud APIs to enable real-time streaming, as well as media ingestion to the cloud. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.0.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v100-2022-12-19) + * **Release**: New AWS service client module + * **Feature**: Amazon Kinesis Video Streams offers capabilities to stream video and audio in real-time via WebRTC to the cloud for storage, playback, and analytical processing. Customers can use our enhanced WebRTC SDK and cloud APIs to enable real-time streaming, as well as media ingestion to the cloud. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.36.0](service/rds/CHANGELOG.md#v1360-2022-12-19) + * **Feature**: Add support for --enable-customer-owned-ip to RDS create-db-instance-read-replica API for RDS on Outposts. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.59.0](service/sagemaker/CHANGELOG.md#v1590-2022-12-19) + * **Feature**: AWS Sagemaker - Sagemaker Images now supports Aliases as secondary identifiers for ImageVersions. SageMaker Images now supports additional metadata for ImageVersions for better images management. + +# Release (2022-12-16) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.22.0](service/appflow/CHANGELOG.md#v1220-2022-12-16) + * **Feature**: This release updates the ListConnectorEntities API action so that it returns paginated responses that customers can retrieve with next tokens. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.22.2](service/cloudfront/CHANGELOG.md#v1222-2022-12-16) + * **Documentation**: Updated documentation for CloudFront +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.20.0](service/datasync/CHANGELOG.md#v1200-2022-12-16) + * **Feature**: AWS DataSync now supports the use of tags with task executions. With this new feature, you can apply tags each time you execute a task, giving you greater control and management over your task executions. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.18.3](service/efs/CHANGELOG.md#v1183-2022-12-16) + * **Documentation**: General documentation updates for EFS. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.16.6](service/guardduty/CHANGELOG.md#v1166-2022-12-16) + * **Documentation**: This release provides the valid characters for the Description and Name field. +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.2.0](service/iotfleetwise/CHANGELOG.md#v120-2022-12-16) + * **Feature**: Updated error handling for empty resource names in "UpdateSignalCatalog" and "GetModelManifest" operations. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.58.0](service/sagemaker/CHANGELOG.md#v1580-2022-12-16) + * **Feature**: AWS sagemaker - Features: This release adds support for random seed, it's an integer value used to initialize a pseudo-random number generator. Setting a random seed will allow the hyperparameter tuning search strategies to produce more consistent configurations for the same tuning job. + # Release (2022-12-15) ## General Highlights @@ -6199,7 +9673,7 @@ * Fixes [issue#1191](https://github.com/aws/aws-sdk-go-v2/issues/1191) * Refactored internal endpoints model for accessors * Feature: updated to latest models -* New services +* New services * `service/location` - v1.0.0 * `service/lookoutmetrics` - v1.0.0 ## Core SDK Highlights @@ -6283,7 +9757,7 @@ of the AWS SDK for Go v2. Version 2 incorporates customer feedback from version ## Breaking Changes * `aws`: Updated Config.Retryer member to be a func that returns aws.Retryer ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) - * Updates the SDK's references to Config.Retryer to be a function that returns aws.Retryer value. This ensures that custom retry options specified in the `aws.Config` are scoped to individual client instances. + * Updates the SDK's references to Config.Retryer to be a function that returns aws.Retryer value. This ensures that custom retry options specified in the `aws.Config` are scoped to individual client instances. * All API clients created with the config will call the `Config.Retryer` function to get an aws.Retryer. * Removes duplicate `Retryer` interface from `retry` package. Single definition is `aws.Retryer` now. * `aws/middleware`: Updates `AddAttemptClockSkewMiddleware` to use appropriate `AddRecordResponseTiming` naming ([#1031](https://github.com/aws/aws-sdk-go-v2/pull/1031)) @@ -6291,7 +9765,7 @@ of the AWS SDK for Go v2. Version 2 incorporates customer feedback from version * `config`: Updated the `WithRetryer` helper to take a function that returns an aws.Retryer ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) * All API clients created with the config will call the `Config.Retryer` function to get an aws.Retryer. * `API Clients`: Fix SDK's API client enum constant name generation to have expected casing ([#1020](https://github.com/aws/aws-sdk-go-v2/pull/1020)) - * This updates of the generated enum const value names in API client's `types` package to have the expected casing. Prior to this, enum names were being generated with lowercase names instead of camel case. + * This updates of the generated enum const value names in API client's `types` package to have the expected casing. Prior to this, enum names were being generated with lowercase names instead of camel case. * `API Clients`: Updates SDK's API client request middleware stack values to be scoped to individual operation call ([#1019](https://github.com/aws/aws-sdk-go-v2/pull/1019)) * The API client request middleware stack values were mistakenly allowed to escape to nested API operation calls. This broke the SDK's presigners. * Stack values that should not escape are not scoped to the individual operation call. @@ -6303,7 +9777,7 @@ of the AWS SDK for Go v2. Version 2 incorporates customer feedback from version * Adds a PresignClient to the `sts` API client module. Use PresignGetCallerIdentity to obtain presigned URLs for the create presigned URLs for the GetCallerIdentity operation. * Fixes [#1021](https://github.com/aws/aws-sdk-go-v2/issues/1021) * `aws/retry`: Add package documentation for retry package ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) - * Adds documentation for the retry package + * Adds documentation for the retry package ## Bug Fixes * `Multiple API Clients`: Fix SDK's generated serde for unmodeled operation input/output ([#1050](https://github.com/aws/aws-sdk-go-v2/pull/1050)) @@ -6401,7 +9875,6 @@ feedback and to take advantage of modern Go language features. * Add support for reading `s3_use_arn_region` from shared config file ([#991](https://github.com/aws/aws-sdk-go-v2/pull/991)) * Add Utility for getting RequestID and HostID of response ([#983](https://github.com/aws/aws-sdk-go-v2/pull/983)) - ## Other changes * Updates branch `HEAD` points from `master` to `main`. * This should not impact your application, but if you have pull requests or forks of the SDK you may need to update the upstream branch your fork is based off of. @@ -6432,7 +9905,6 @@ The `config#LoadDefaultConfig` function has been updated to require a `context.C The v2 SDK corrects its behavior to be inline with the AWS CLI and other AWS SDKs. Refer to https://docs.aws.amazon.com/credref/latest/refdocs/overview.html for more information how to use the shared config and credentials files. - # Release 2020-11-30 ## Breaking Change @@ -6592,7 +10064,6 @@ The `config` module's exported types were trimmed down to add clarity and reduce * `credentials` module released at `v0.1.1` * `ec2imds` module released at `v0.1.1` - # Release 2020-09-28 ## Announcements We’re happy to share the updated clients for the v0.25.0 preview version of the AWS SDK for Go V2. @@ -6632,7 +10103,6 @@ As a part of the refactoring done to v2 preview SDK some components have not bee We expect additional breaking changes to the v2 preview SDK in the coming releases. We expect these changes to focus on organizational, naming, and hardening the SDK's design for future feature capabilities after it is released for general availability. - #### Relocated Packages In this release packages within the SDK were relocated, and in some cases those packages were converted to Go modules. The following is a list of packages have were relocated. @@ -6647,7 +10117,6 @@ The `github.com/aws/aws-sdk-go-v2/credentials` module contains refactored creden * `github.com/aws/aws-sdk-go-v2/processcreds` => `github.com/aws/aws-sdk-go-v2/credentials/processcreds` * `github.com/aws/aws-sdk-go-v2/stscreds` => `github.com/aws/aws-sdk-go-v2/credentials/stscreds` - #### Modularization New modules were added to the v2 preview SDK to allow the components to be versioned independently from each other. This allows your application to depend on specific versions of an API client module, and take discrete updates from the SDK core and other API client modules as desired. @@ -6656,7 +10125,6 @@ New modules were added to the v2 preview SDK to allow the components to be versi * [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) * Module for each API client, e.g. [github.com/aws/aws-sdk-go-v2/service/s3](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3) - #### API Clients The following is a list of the major changes to the API client modules @@ -6676,14 +10144,12 @@ result, err := client.Scan(context.TODO(), &dynamodb.ScanInput{ }) ``` - #### Configuration In addition to the `github.com/aws/aws-sdk-go-v2/aws/external` package being made a module at `github.com/aws/aws-sdk-go-v2/config`, the `LoadDefaultAWSConfig` function was renamed to `LoadDefaultConfig`. The `github.com/aws/aws-sdk-go-v2/aws/defaults` package has been removed. Its components have been migrated to the `github.com/aws/aws-sdk-go-v2/aws` package, and `github.com/aws/aws-sdk-go-v2/config` module. - #### Error Handling The `github.com/aws/aws-sdk-go-v2/aws/awserr` package was removed as a part of the SDK error handling refactor. The SDK now uses typed errors built around [Go v1.13](https://golang.org/doc/go1.13#error_wrapping)'s [errors.As](https://pkg.go.dev/errors#As) and [errors.Unwrap](https://pkg.go.dev/errors#Unwrap) features. All SDK error types that wrap other errors implement the `Unwrap` method. Generic v2 preview SDK errors created with `fmt.Errorf` use `%w` to wrap the underlying error. @@ -6726,7 +10192,6 @@ Logging an error value will include information from each wrapped error. For exa > 2020/10/15 16:03:37 operation error DynamoDB: Scan, https response error StatusCode: 400, RequestID: ABCREQUESTID123, ResourceNotFoundException: Requested resource not found - #### Endpoints The `github.com/aws/aws-sdk-go-v2/aws/endpoints` has been removed from the SDK, along with all exported endpoint definitions and iteration behavior. Each generated API client now includes its own endpoint definition internally to the module. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md b/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md index 3b644668..5b627cfa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md @@ -1,4 +1,4 @@ ## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md b/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md index c2fc3b8f..5e59bba7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md @@ -14,29 +14,28 @@ Jump To: * [Feature Requests](#feature-requests) * [Code Contributions](#code-contributions) - ## How to contribute *Before you send us a pull request, please be sure that:* -1. You're working from the latest source on the master branch. -2. You check existing open, and recently closed, pull requests to be sure +1. You're working from the latest source on the `main` branch. +2. You check existing open, and recently closed, pull requests to be sure that someone else hasn't already addressed the problem. -3. You create an issue before working on a contribution that will take a +3. You create an issue before working on a contribution that will take a significant amount of your time. *Creating a Pull Request* 1. Fork the repository. -2. In your fork, make your change in a branch that's based on this repo's master branch. +2. In your fork, make your change in a branch that's based on this repo's `main` branch. 3. Commit the change to your fork, using a clear and descriptive commit message. 4. Create a pull request, answering any questions in the pull request form. -For contributions that will take a significant amount of time, open a new -issue to pitch your idea before you get started. Explain the problem and -describe the content you want to see added to the documentation. Let us know -if you'll write it yourself or if you'd like us to help. We'll discuss your -proposal with you and let you know whether we're likely to accept it. +For contributions that will take a significant amount of time, open a new +issue to pitch your idea before you get started. Explain the problem and +describe the content you want to see added to the documentation. Let us know +if you'll write it yourself or if you'd like us to help. We'll discuss your +proposal with you and let you know whether we're likely to accept it. ## Bug Reports @@ -74,9 +73,9 @@ guidelines prior to filing a bug report. Open an [issue][issues] with the following: -* A short, descriptive title. Ideally, other community members should be able +* A short, descriptive title. Ideally, other community members should be able to get a good idea of the feature just from reading the title. -* A detailed description of the the proposed feature. +* A detailed description of the the proposed feature. * Why it should be added to the SDK. * If possible, example code to illustrate how it should work. * Use Markdown to make the request easier to read; @@ -97,7 +96,7 @@ Please be aware of the following notes prior to opening a pull request: 3. Wherever possible, pull requests should contain tests as appropriate. Bugfixes should contain tests that exercise the corrected behavior (i.e., the - test should fail without the bugfix and pass with it), and new features + test should fail without the bugfix and pass with it), and new features should be accompanied by tests exercising the feature. 4. Pull requests that contain failing tests will not be merged until the test @@ -112,7 +111,7 @@ Please be aware of the following notes prior to opening a pull request: ### Testing -To run the tests locally, running the `make unit` command will `go get` the +To run the tests locally, running the `make unit` command will `go get` the SDK's testing dependencies, and run vet, link and unit tests for the SDK. ``` @@ -129,7 +128,7 @@ go test -tags codegen ./private/... See the `Makefile` for additional testing tags that can be used in testing. -To test on multiple platform the SDK includes several DockerFiles under the +To test on multiple platform the SDK includes several DockerFiles under the `awstesting/sandbox` folder, and associated make recipes to to execute unit testing within environments configured for specific Go versions. @@ -170,9 +169,9 @@ This will result in a patch version change. * `SDK Bugs` - For minor changes that resolve an issue. This will result in a patch version change. -[issues]: https://github.com/aws/aws-sdk-go/issues -[pr]: https://github.com/aws/aws-sdk-go/pulls +[issues]: https://github.com/aws/aws-sdk-go-v2/issues +[pr]: https://github.com/aws/aws-sdk-go-v2/pulls [license]: http://aws.amazon.com/apache2.0/ [cla]: http://en.wikipedia.org/wiki/Contributor_License_Agreement -[releasenotes]: https://github.com/aws/aws-sdk-go/releases +[releasenotes]: https://github.com/aws/aws-sdk-go-v2/releases diff --git a/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md b/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md index 8490c7d6..4c9be94a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md @@ -12,4 +12,4 @@ Past Discussions --- The issues listed here are for documentation purposes, and is used to capture issues and their associated discussions. -[Code of Conduct]: https://github.com/aws/aws-sdk-go-v2/blob/master/CODE_OF_CONDUCT.md +[Code of Conduct]: https://github.com/aws/aws-sdk-go-v2/blob/main/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/aws/aws-sdk-go-v2/Makefile b/vendor/github.com/aws/aws-sdk-go-v2/Makefile index 4bc9dfaf..4f74a265 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/Makefile +++ b/vendor/github.com/aws/aws-sdk-go-v2/Makefile @@ -246,7 +246,6 @@ unit-race-modules-%: "go test ${BUILD_TAGS} ${RUN_NONE} ./..." \ "go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..." - unit-modules-%: @# unit command that uses the pattern to define the root path that the @# module testing will start from. Strips off the "unit-modules-" and @@ -408,7 +407,6 @@ bench-modules-%: && go run . -p $(subst _,/,$(subst bench-modules-,,$@)) ${EACHMODULE_FLAGS} \ "go test -timeout=10m -bench . --benchmem ${BUILD_TAGS} ${RUN_NONE} ./..." - ##################### # Release Process # ##################### @@ -499,14 +497,22 @@ list-deps-%: ################### .PHONY: sandbox-tests sandbox-build-% sandbox-run-% sandbox-test-% update-aws-golang-tip -sandbox-tests: sandbox-test-go1.15 sandbox-test-go1.16 sandbox-test-go1.17 sandbox-test-gotip +sandbox-tests: sandbox-test-go1.15 sandbox-test-go1.16 sandbox-test-go1.17 sandbox-test-go1.18 sandbox-test-go1.19 sandbox-test-go1.20 sandbox-test-gotip sandbox-build-%: @# sandbox-build-go1.17 @# sandbox-build-gotip - docker build \ - -f ./internal/awstesting/sandbox/Dockerfile.test.$(subst sandbox-build-,,$@) \ - -t "aws-sdk-go-$(subst sandbox-build-,,$@)" . + @if [ $@ == sandbox-build-gotip ]; then\ + docker build \ + -f ./internal/awstesting/sandbox/Dockerfile.test.gotip \ + -t "aws-sdk-go-$(subst sandbox-build-,,$@)" . ;\ + else\ + docker build \ + --build-arg GO_VERSION=$(subst sandbox-build-go,,$@) \ + -f ./internal/awstesting/sandbox/Dockerfile.test.goversion \ + -t "aws-sdk-go-$(subst sandbox-build-,,$@)" . ;\ + fi + sandbox-run-%: sandbox-build-% @# sandbox-run-go1.17 @# sandbox-run-gotip diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt index 5f14d116..899129ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt +++ b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt @@ -1,3 +1,3 @@ AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/README.md b/vendor/github.com/aws/aws-sdk-go-v2/README.md index da74d0e3..54626706 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/README.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/README.md @@ -1,7 +1,6 @@ # AWS SDK for Go v2 -[![Go Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml)[![Codegen Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml) [![SDK Documentation](https://img.shields.io/badge/SDK-Documentation-blue)](https://aws.github.io/aws-sdk-go-v2/docs/) [![Migration Guide](https://img.shields.io/badge/Migration-Guide-blue)](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) [![API Reference](https://img.shields.io/badge/api-reference-blue.svg)](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) - +[![Go Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml)[![Codegen Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml) [![SDK Documentation](https://img.shields.io/badge/SDK-Documentation-blue)](https://aws.github.io/aws-sdk-go-v2/docs/) [![Migration Guide](https://img.shields.io/badge/Migration-Guide-blue)](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) [![API Reference](https://img.shields.io/badge/api-reference-blue.svg)](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) `aws-sdk-go-v2` is the v2 AWS SDK for the Go programming language. @@ -87,7 +86,7 @@ func main() { ###### Compile and Execute ```sh $ go run . -Table: +Tables: tableOne tableTwo ``` @@ -97,9 +96,9 @@ tableTwo Please use these community resources for getting help. We use the GitHub issues for tracking bugs and feature requests. -* Ask a question on [StackOverflow](http://stackoverflow.com/) and tag it with the [`aws-sdk-go`](http://stackoverflow.com/questions/tagged/aws-sdk-go) tag. -* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html). +* Ask us a [question](https://github.com/aws/aws-sdk-go-v2/discussions/new?category=q-a) or open a [discussion](https://github.com/aws/aws-sdk-go-v2/discussions/new?category=general). * If you think you may have found a bug, please open an [issue](https://github.com/aws/aws-sdk-go-v2/issues/new/choose). +* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html). This SDK implements AWS service APIs. For general issues regarding the AWS services and their limitations, you may also take a look at the [Amazon Web Services Discussion Forums](https://forums.aws.amazon.com/). @@ -118,7 +117,7 @@ Keeping the list of open issues lean will help us respond in a timely manner. ## Feedback and contributing -The v2 SDK will use GitHub [Issues] to track feature requests and issues with the SDK. In addition, we'll use GitHub [Projects] to track large tasks spanning multiple pull requests, such as refactoring the SDK's internal request lifecycle. You can provide feedback to us in several ways. +The v2 SDK will use GitHub [Issues] to track feature requests and issues with the SDK. In addition, we'll use GitHub [Projects] to track large tasks spanning multiple pull requests, such as refactoring the SDK's internal request lifecycle. You can provide feedback to us in several ways. **GitHub issues**. To provide feedback or report bugs, file GitHub [Issues] on the SDK. This is the preferred mechanism to give feedback so that other users can engage in the conversation, +1 issues, etc. Issues you open will be evaluated, and included in our roadmap for the GA launch. @@ -139,8 +138,8 @@ API operation require parameters. [Service Documentation](https://aws.amazon.com/documentation/) - Use this documentation to learn how to interface with AWS services. These guides are -great for getting started with a service, or when looking for more -information about a service. While this document is not required for coding, +great for getting started with a service, or when looking for more +information about a service. While this document is not required for coding, services may supply helpful samples to look out for. [Forum](https://forums.aws.amazon.com/forum.jspa?forumID=293) - Ask questions, get help, and give feedback @@ -151,7 +150,7 @@ services may supply helpful samples to look out for. [Dep]: https://github.com/golang/dep [Issues]: https://github.com/aws/aws-sdk-go-v2/issues [Projects]: https://github.com/aws/aws-sdk-go-v2/projects -[CHANGELOG]: https://github.com/aws/aws-sdk-go-v2/blob/master/CHANGELOG.md +[CHANGELOG]: https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md [Amazon DynamoDB]: https://aws.amazon.com/dynamodb/ -[design]: https://github.com/aws/aws-sdk-go-v2/blob/master/DESIGN.md +[design]: https://github.com/aws/aws-sdk-go-v2/blob/main/DESIGN.md [license]: http://aws.amazon.com/apache2.0/ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 37c643dd..abee83b7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.3" +const goModuleVersion = "1.18.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go new file mode 100644 index 00000000..3f6aaf23 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go @@ -0,0 +1,94 @@ +package middleware + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "os" +) + +const envAwsLambdaFunctionName = "AWS_LAMBDA_FUNCTION_NAME" +const envAmznTraceID = "_X_AMZN_TRACE_ID" +const amznTraceIDHeader = "X-Amzn-Trace-Id" + +// AddRecursionDetection adds recursionDetection to the middleware stack +func AddRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&RecursionDetection{}, middleware.After) +} + +// RecursionDetection detects Lambda environment and sets its X-Ray trace ID to request header if absent +// to avoid recursion invocation in Lambda +type RecursionDetection struct{} + +// ID returns the middleware identifier +func (m *RecursionDetection) ID() string { + return "RecursionDetection" +} + +// HandleBuild detects Lambda environment and adds its trace ID to request header if absent +func (m *RecursionDetection) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + _, hasLambdaEnv := os.LookupEnv(envAwsLambdaFunctionName) + xAmznTraceID, hasTraceID := os.LookupEnv(envAmznTraceID) + value := req.Header.Get(amznTraceIDHeader) + // only set the X-Amzn-Trace-Id header when it is not set initially, the + // current environment is Lambda and the _X_AMZN_TRACE_ID env variable exists + if value != "" || !hasLambdaEnv || !hasTraceID { + return next.HandleBuild(ctx, in) + } + + req.Header.Set(amznTraceIDHeader, percentEncode(xAmznTraceID)) + return next.HandleBuild(ctx, in) +} + +func percentEncode(s string) string { + upperhex := "0123456789ABCDEF" + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEncode(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + required := len(s) + 2*hexCount + t := make([]byte, required) + j := 0 + for i := 0; i < len(s); i++ { + if c := s[i]; shouldEncode(c) { + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + return string(t) +} + +func shouldEncode(c byte) bool { + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '=', ';', ':', '+', '&', '[', ']', '{', '}', '"', '\'', ',': + return false + default: + return true + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go index 9d7d3a0c..47ebc0f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go @@ -36,20 +36,31 @@ type Array struct { memberName string // Elements are stored in values, so we keep track of the list size here. size int32 + // Empty lists are encoded as "=", if we add a value later we will + // remove this encoding + emptyValue Value } func newArray(values url.Values, prefix string, flat bool, memberName string) *Array { + emptyValue := newValue(values, prefix, flat) + emptyValue.String("") + return &Array{ values: values, prefix: prefix, flat: flat, memberName: memberName, + emptyValue: emptyValue, } } // Value adds a new element to the Query Array. Returns a Value type used to // encode the array element. func (a *Array) Value() Value { + if a.size == 0 { + delete(a.values, a.emptyValue.key) + } + // Query lists start a 1, so adjust the size first a.size++ prefix := a.prefix diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go index c228f7d8..6975ce65 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go @@ -21,26 +21,18 @@ func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorCompone if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) } - return ErrorComponents{ - Code: errResponse.Code, - Message: errResponse.Message, - RequestID: errResponse.RequestID, - }, nil + return ErrorComponents(errResponse), nil } var errResponse wrappedErrorResponse if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) } - return ErrorComponents{ - Code: errResponse.Code, - Message: errResponse.Message, - RequestID: errResponse.RequestID, - }, nil + return ErrorComponents(errResponse), nil } // noWrappedErrorResponse represents the error response body with -// no internal ... +// wrapped within Error type wrappedErrorResponse struct { Code string `xml:"Error>Code"` Message string `xml:"Error>Message"` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go index 12a3f0c4..d89090ad 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go @@ -30,10 +30,6 @@ func NewTokenRateLimit(tokens uint) *TokenRateLimit { } } -func isTimeoutError(error) bool { - return false -} - type canceledError struct { Err error } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go index 3326289a..822fc920 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -11,7 +11,6 @@ import ( awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" smithymiddle "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/transport/http" ) @@ -292,7 +291,7 @@ type retryMetadataKey struct{} // Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues // to clear all stack values. func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) { - metadata, ok = middleware.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata) + metadata, ok = smithymiddle.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata) return metadata, ok } @@ -301,7 +300,7 @@ func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) { // Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues // to clear all stack values. func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context { - return middleware.WithStackValue(ctx, retryMetadataKey{}, metadata) + return smithymiddle.WithStackValue(ctx, retryMetadataKey{}, metadata) } // AddRetryMiddlewaresOptions is the set of options that can be passed to diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go index c695e6fe..00d7d3ee 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -95,8 +95,13 @@ func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { var timeoutErr interface{ Timeout() bool } var urlErr *url.Error var netOpErr *net.OpError + var dnsError *net.DNSError switch { + case errors.As(err, &dnsError): + // NXDOMAIN errors should not be retried + retryable = !dnsError.IsNotFound && dnsError.IsTemporary + case errors.As(err, &conErr) && conErr.ConnectionError(): retryable = true diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go index 6777e21e..b0ba4cb2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go @@ -54,7 +54,7 @@ type Retryer interface { MaxAttempts() int // RetryDelay returns the delay that should be used before retrying the - // attempt. Will return error if the if the delay could not be determined. + // attempt. Will return error if the delay could not be determined. RetryDelay(attempt int, opErr error) (time.Duration, error) // GetRetryToken attempts to deduct the retry cost from the retry token pool. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go index 85a1d8f0..64c4c484 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go @@ -7,6 +7,7 @@ var IgnoredHeaders = Rules{ "Authorization": struct{}{}, "User-Agent": struct{}{}, "X-Amzn-Trace-Id": struct{}{}, + "Expect": struct{}{}, }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 04278678..325779c0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,79 @@ +# v1.18.27 (2023-06-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.26 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.25 (2023-05-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.24 (2023-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.23 (2023-05-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.22 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.21 (2023-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.20 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.19 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.18 (2023-03-16) + +* **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015. + +# v1.18.17 (2023-03-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.16 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.15 (2023-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.14 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.13 (2023-02-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.12 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.11 (2023-02-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.10 (2023-01-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2023-01-23) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.18.8 (2023-01-05) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 89734b1e..9c6d1721 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.8" +const goModuleVersion = "1.18.27" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go index 1bb6addf..b21cd308 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go @@ -384,10 +384,6 @@ func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, ro return fmt.Errorf("token file path is not set") } - if len(roleARN) == 0 { - return fmt.Errorf("role ARN is not set") - } - optFns := []func(*stscreds.WebIdentityRoleOptions){ func(options *stscreds.WebIdentityRoleOptions) { options.RoleSessionName = sessionName @@ -398,11 +394,29 @@ func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, ro if err != nil { return err } + if found { optFns = append(optFns, optFn) } - provider := stscreds.NewWebIdentityRoleProvider(sts.NewFromConfig(*cfg), roleARN, stscreds.IdentityTokenFile(filepath), optFns...) + opts := stscreds.WebIdentityRoleOptions{ + RoleARN: roleARN, + } + + for _, fn := range optFns { + fn(&opts) + } + + if len(opts.RoleARN) == 0 { + return fmt.Errorf("role ARN is not set") + } + + client := opts.Client + if client == nil { + client = sts.NewFromConfig(*cfg) + } + + provider := stscreds.NewWebIdentityRoleProvider(client, roleARN, stscreds.IdentityTokenFile(filepath), optFns...) cfg.Credentials = provider diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 91d150e6..0b0958bd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,75 @@ +# v1.13.26 (2023-06-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.25 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.24 (2023-05-09) + +* No change notes available for this release. + +# v1.13.23 (2023-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.22 (2023-05-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.21 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.20 (2023-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.19 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.18 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.17 (2023-03-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.16 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.15 (2023-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.14 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.13 (2023-02-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.12 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.11 (2023-02-01) + +* No change notes available for this release. + +# v1.13.10 (2023-01-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2023-01-23) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.8 (2023-01-05) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go index 72214bf4..6ed71b42 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go @@ -11,7 +11,7 @@ // # Loading credentials with the SDK's AWS Config // // The EC2 Instance role credentials provider will automatically be the resolved -// credential provider int he credential chain if no other credential provider is +// credential provider in the credential chain if no other credential provider is // resolved first. // // To explicitly instruct the SDK's credentials resolving to use the EC2 Instance diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 8cbe05de..878dfaf6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.8" +const goModuleVersion = "1.13.26" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go index 3921da34..fe9345e2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go @@ -149,12 +149,24 @@ func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *P return p } -type credentialProcessResponse struct { - Version int - AccessKeyID string `json:"AccessKeyId"` +// A CredentialProcessResponse is the AWS credentials format that must be +// returned when executing an external credential_process. +type CredentialProcessResponse struct { + // As of this writing, the Version key must be set to 1. This might + // increment over time as the structure evolves. + Version int + + // The access key ID that identifies the temporary security credentials. + AccessKeyID string `json:"AccessKeyId"` + + // The secret access key that can be used to sign requests. SecretAccessKey string - SessionToken string - Expiration *time.Time + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken string + + // The date on which the current credentials expire. + Expiration *time.Time } // Retrieve executes the credential process command and returns the @@ -166,7 +178,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { } // Serialize and validate response - resp := &credentialProcessResponse{} + resp := &CredentialProcessResponse{} if err = json.Unmarshal(out, resp); err != nil { return aws.Credentials{Source: ProviderName}, &ProviderError{ Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err), diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go index 43e5676d..ece1e65f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go @@ -11,12 +11,11 @@ // # Loading AWS SSO credentials with the AWS shared configuration file // // You can use configure AWS SSO credentials from the AWS shared configuration file by -// providing the specifying the required keys in the profile: +// specifying the required keys in the profile and referencing an sso-session: // +// sso_session // sso_account_id -// sso_region // sso_role_name -// sso_start_url // // For example, the following defines a profile "devsso" and specifies the AWS // SSO parameters that defines the target account, role, sign-on portal, and @@ -24,11 +23,15 @@ // provided, or an error will be returned. // // [profile devsso] -// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_session = dev-session // sso_role_name = SSOReadOnlyRole -// sso_region = us-east-1 // sso_account_id = 123456789012 // +// [sso-session dev-session] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_region = us-east-1 +// sso_registration_scopes = sso:account:access +// // Using the config module, you can load the AWS SDK shared configuration, and // specify that this profile be used to retrieve credentials. For example: // @@ -43,10 +46,17 @@ // and provide the necessary information to load and retrieve temporary // credentials using an access token from ~/.aws/sso/cache. // -// client := sso.NewFromConfig(cfg) +// ssoClient := sso.NewFromConfig(cfg) +// ssoOidcClient := ssooidc.NewFromConfig(cfg) +// tokenPath, err := ssocreds.StandardCachedTokenFilepath("dev-session") +// if err != nil { +// return err +// } // // var provider aws.CredentialsProvider -// provider = ssocreds.New(client, "123456789012", "SSOReadOnlyRole", "us-east-1", "https://my-sso-portal.awsapps.com/start") +// provider = ssocreds.New(ssoClient, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start", func(options *ssocreds.Options) { +// options.SSOTokenProvider = ssocreds.NewSSOTokenProvider(ssoOidcClient, tokenPath) +// }) // // // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time // provider = aws.NewCredentialsCache(provider) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 2230c418..b4d50c42 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,35 @@ +# v1.13.4 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2023-03-14) + +* **Feature**: Add flag to disable IMDSv1 fallback + +# v1.12.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.12.21 (2022-12-15) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go index f97730bd..e55edd99 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go @@ -174,6 +174,16 @@ type Options struct { // The logger writer interface to write logging messages to. Logger logging.Logger + // Configure IMDSv1 fallback behavior. By default, the client will attempt + // to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary] + // the client will return any errors encountered from attempting to fetch a token + // instead of silently using the insecure data flow of IMDSv1. + // + // See [configuring IMDS] for more information. + // + // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + EnableFallback aws.Ternary + // provides the caching of API tokens used for operation calls. If unset, // the API token will not be retrieved for the operation. tokenProvider *tokenProvider diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index de4ef3ef..6fab6e91 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.21" +const goModuleVersion = "1.13.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go index 275fade4..5703c6e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go @@ -4,12 +4,14 @@ import ( "context" "errors" "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" "net/http" "sync" "sync/atomic" "time" - smithy "github.com/aws/smithy-go" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -68,7 +70,7 @@ func (t *tokenProvider) HandleFinalize( ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - if !t.enabled() { + if t.fallbackEnabled() && !t.enabled() { // short-circuits to insecure data flow if token provider is disabled. return next.HandleFinalize(ctx, input) } @@ -115,23 +117,15 @@ func (t *tokenProvider) HandleDeserialize( } if resp.StatusCode == http.StatusUnauthorized { // unauthorized - err = &retryableError{Err: err} t.enable() + err = &retryableError{Err: err, isRetryable: true} } return out, metadata, err } -type retryableError struct { - Err error -} - -func (*retryableError) RetryableError() bool { return true } - -func (e *retryableError) Error() string { return e.Err.Error() } - func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) { - if !t.enabled() { + if t.fallbackEnabled() && !t.enabled() { return nil, &bypassTokenRetrievalError{ Err: fmt.Errorf("cannot get API token, provider disabled"), } @@ -147,7 +141,7 @@ func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) tok, err = t.updateToken(ctx) if err != nil { - return nil, fmt.Errorf("cannot get API token, %w", err) + return nil, err } return tok, nil @@ -167,17 +161,19 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { TokenTTL: t.tokenTTL, }) if err != nil { - // change the disabled flag on token provider to true, when error is request timeout error. var statusErr interface{ HTTPStatusCode() int } if errors.As(err, &statusErr) { switch statusErr.HTTPStatusCode() { - - // Disable get token if failed because of 403, 404, or 405 + // Disable future get token if failed because of 403, 404, or 405 case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: - t.disable() + if t.fallbackEnabled() { + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err) + t.disable() + } // 400 errors are terminal, and need to be upstreamed case http.StatusBadRequest: @@ -192,8 +188,17 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { atomic.StoreUint32(&t.disabled, 1) } - // Token couldn't be retrieved, but bypass this, and allow the - // request to continue. + if !t.fallbackEnabled() { + // NOTE: getToken() is an implementation detail of some outer operation + // (e.g. GetMetadata). It has its own retries that have already been exhausted. + // Mark the underlying error as a terminal error. + err = &retryableError{Err: err, isRetryable: false} + return nil, err + } + + // Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request + // and allow the request to proceed. Future requests _may_ re-attempt fetching a + // token if not disabled. return nil, &bypassTokenRetrievalError{Err: err} } @@ -206,21 +211,21 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { return tok, nil } -type bypassTokenRetrievalError struct { - Err error -} - -func (e *bypassTokenRetrievalError) Error() string { - return fmt.Sprintf("bypass token retrieval, %v", e.Err) -} - -func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err } - // enabled returns if the token provider is current enabled or not. func (t *tokenProvider) enabled() bool { return atomic.LoadUint32(&t.disabled) == 0 } +// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise +func (t *tokenProvider) fallbackEnabled() bool { + switch t.client.options.EnableFallback { + case aws.FalseTernary: + return false + default: + return true + } +} + // disable disables the token provider and it will no longer attempt to inject // the token, nor request updates. func (t *tokenProvider) disable() { @@ -235,3 +240,22 @@ func (t *tokenProvider) enable() { t.tokenMux.Unlock() atomic.StoreUint32(&t.disabled, 0) } + +type bypassTokenRetrievalError struct { + Err error +} + +func (e *bypassTokenRetrievalError) Error() string { + return fmt.Sprintf("bypass token retrieval, %v", e.Err) +} + +func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err } + +type retryableError struct { + Err error + isRetryable bool +} + +func (e *retryableError) RetryableError() bool { return e.isRetryable } + +func (e *retryableError) Error() string { return e.Err.Error() } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index cffa7288..297f1e42 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,31 @@ +# v1.1.34 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.33 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.32 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.31 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.30 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.29 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.28 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.1.27 (2022-12-15) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index b47c6baa..2948b315 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.1.27" +const goModuleVersion = "1.1.34" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index fb3d33ff..0ffc56c3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,31 @@ +# v2.4.28 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.27 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.26 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.25 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.4.21 (2022-12-15) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index e6a8286d..0a8b6901 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.4.21" +const goModuleVersion = "2.4.28" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index efd865d6..494c4dba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,31 @@ +# v1.3.35 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.34 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.33 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.32 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.31 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.30 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.29 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.28 (2022-12-15) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index 96ac9fbe..47934362 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.28" +const goModuleVersion = "1.3.35" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md index 8eeb4fee..3d155118 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md @@ -1,3 +1,80 @@ +# v1.18.11 (2023-05-04) + +* No change notes available for this release. + +# v1.18.10 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2023-04-10) + +* No change notes available for this release. + +# v1.18.8 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.18.4 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.18.2 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2023-01-23) + +* No change notes available for this release. + +# v1.18.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.17.25 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.24 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.23 (2022-11-22) + +* No change notes available for this release. + +# v1.17.22 (2022-11-16) + +* No change notes available for this release. + +# v1.17.21 (2022-11-10) + +* No change notes available for this release. + +# v1.17.20 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.19 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.17.18 (2022-09-20) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go index c51da783..add37d36 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go @@ -115,7 +115,7 @@ type Options struct { Retryer aws.Retryer // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You // should not populate this structure programmatically, or rely on the values here // within your applications. RuntimeEnvironment aws.RuntimeEnvironment diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go index cea7521f..13489da6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go @@ -57,8 +57,8 @@ type BatchCheckLayerAvailabilityOutput struct { // Any failures associated with the call. Failures []types.LayerFailure - // A list of image layer objects corresponding to the image layer references in the - // request. + // A list of image layer objects corresponding to the image layer references in + // the request. Layers []types.Layer // Metadata pertaining to the operation's result. @@ -118,6 +118,9 @@ func (c *Client) addOperationBatchCheckLayerAvailabilityMiddlewares(stack *middl if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchCheckLayerAvailability(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go index 80c9d5b5..646f46a0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go @@ -12,7 +12,7 @@ import ( ) // Deletes a list of specified images within a repository. Images are specified -// with either an imageTag or imageDigest. You can remove a tag from an image by +// with either an imageTag or imageDigest . You can remove a tag from an image by // specifying the image's tag in your request. When you remove the last tag from an // image, the image is deleted from your repository. You can completely delete an // image (and all of its tags) by specifying the image's digest in your request. @@ -32,11 +32,11 @@ func (c *Client) BatchDeleteImage(ctx context.Context, params *BatchDeleteImageI } // Deletes specified images within a specified repository. Images are specified -// with either the imageTag or imageDigest. +// with either the imageTag or imageDigest . type BatchDeleteImageInput struct { - // A list of image ID references that correspond to images to delete. The format of - // the imageIds reference is imageTag=tag or imageDigest=digest. + // A list of image ID references that correspond to images to delete. The format + // of the imageIds reference is imageTag=tag or imageDigest=digest . // // This member is required. ImageIds []types.ImageIdentifier @@ -119,6 +119,9 @@ func (c *Client) addOperationBatchDeleteImageMiddlewares(stack *middleware.Stack if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchDeleteImage(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go index b2724f89..ce9b83a3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go @@ -12,7 +12,7 @@ import ( ) // Gets detailed information for an image. Images are specified with either an -// imageTag or imageDigest. When an image is pulled, the BatchGetImage API is +// imageTag or imageDigest . When an image is pulled, the BatchGetImage API is // called once to retrieve the image manifest. func (c *Client) BatchGetImage(ctx context.Context, params *BatchGetImageInput, optFns ...func(*Options)) (*BatchGetImageOutput, error) { if params == nil { @@ -32,7 +32,7 @@ func (c *Client) BatchGetImage(ctx context.Context, params *BatchGetImageInput, type BatchGetImageInput struct { // A list of image ID references that correspond to images to describe. The format - // of the imageIds reference is imageTag=tag or imageDigest=digest. + // of the imageIds reference is imageTag=tag or imageDigest=digest . // // This member is required. ImageIds []types.ImageIdentifier @@ -121,6 +121,9 @@ func (c *Client) addOperationBatchGetImageMiddlewares(stack *middleware.Stack, o if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchGetImage(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go index 42adcbbc..3611dbe5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go @@ -102,6 +102,9 @@ func (c *Client) addOperationBatchGetRepositoryScanningConfigurationMiddlewares( if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchGetRepositoryScanningConfiguration(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go index ac1fe8a3..de6185b9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go @@ -129,6 +129,9 @@ func (c *Client) addOperationCompleteLayerUploadMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCompleteLayerUpload(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go index 6a67466e..75c3ee4e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go @@ -122,6 +122,9 @@ func (c *Client) addOperationCreatePullThroughCacheRuleMiddlewares(stack *middle if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreatePullThroughCacheRule(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go index 1f22e5c8..362fe54b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go @@ -11,9 +11,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a repository. For more information, see Amazon ECR repositories -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) in -// the Amazon Elastic Container Registry User Guide. +// Creates a repository. For more information, see Amazon ECR repositories (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) +// in the Amazon Elastic Container Registry User Guide. func (c *Client) CreateRepository(ctx context.Context, params *CreateRepositoryInput, optFns ...func(*Options)) (*CreateRepositoryOutput, error) { if params == nil { params = &CreateRepositoryInput{} @@ -32,8 +31,8 @@ func (c *Client) CreateRepository(ctx context.Context, params *CreateRepositoryI type CreateRepositoryInput struct { // The name to use for the repository. The repository name may be specified on its - // own (such as nginx-web-app) or it can be prepended with a namespace to group the - // repository into a category (such as project-a/nginx-web-app). + // own (such as nginx-web-app ) or it can be prepended with a namespace to group + // the repository into a category (such as project-a/nginx-web-app ). // // This member is required. RepositoryName *string @@ -47,8 +46,8 @@ type CreateRepositoryInput struct { // repository. ImageScanningConfiguration *types.ImageScanningConfiguration - // The tag mutability setting for the repository. If this parameter is omitted, the - // default setting of MUTABLE will be used which will allow image tags to be + // The tag mutability setting for the repository. If this parameter is omitted, + // the default setting of MUTABLE will be used which will allow image tags to be // overwritten. If IMMUTABLE is specified, all image tags within the repository // will be immutable which will prevent them from being overwritten. ImageTagMutability types.ImageTagMutability @@ -128,6 +127,9 @@ func (c *Client) addOperationCreateRepositoryMiddlewares(stack *middleware.Stack if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateRepository(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go index 1f215085..914a512c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go @@ -113,6 +113,9 @@ func (c *Client) addOperationDeleteLifecyclePolicyMiddlewares(stack *middleware. if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteLifecyclePolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go index 1f4bd970..413c55a2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go @@ -114,6 +114,9 @@ func (c *Client) addOperationDeletePullThroughCacheRuleMiddlewares(stack *middle if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeletePullThroughCacheRule(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go index a9230f95..dceab652 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go @@ -92,6 +92,9 @@ func (c *Client) addOperationDeleteRegistryPolicyMiddlewares(stack *middleware.S if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRegistryPolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go index 525834b9..0b2b866c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go @@ -108,6 +108,9 @@ func (c *Client) addOperationDeleteRepositoryMiddlewares(stack *middleware.Stack if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRepository(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go index dd23305f..593901b8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go @@ -110,6 +110,9 @@ func (c *Client) addOperationDeleteRepositoryPolicyMiddlewares(stack *middleware if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRepositoryPolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go index 0edd166a..0085efcf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go @@ -114,6 +114,9 @@ func (c *Client) addOperationDescribeImageReplicationStatusMiddlewares(stack *mi if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImageReplicationStatus(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go index 5908d7ed..f75246f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go @@ -45,11 +45,11 @@ type DescribeImageScanFindingsInput struct { RepositoryName *string // The maximum number of image scan results returned by DescribeImageScanFindings - // in paginated output. When this parameter is used, DescribeImageScanFindings only - // returns maxResults results in a single page along with a nextToken response - // element. The remaining results of the initial request can be seen by sending - // another DescribeImageScanFindings request with the returned nextToken value. - // This value can be between 1 and 1000. If this parameter is not used, then + // in paginated output. When this parameter is used, DescribeImageScanFindings + // only returns maxResults results in a single page along with a nextToken + // response element. The remaining results of the initial request can be seen by + // sending another DescribeImageScanFindings request with the returned nextToken + // value. This value can be between 1 and 1000. If this parameter is not used, then // DescribeImageScanFindings returns up to 100 results and a nextToken value, if // applicable. MaxResults *int32 @@ -81,7 +81,7 @@ type DescribeImageScanFindingsOutput struct { ImageScanStatus *types.ImageScanStatus // The nextToken value to include in a future DescribeImageScanFindings request. - // When the results of a DescribeImageScanFindings request exceed maxResults, this + // When the results of a DescribeImageScanFindings request exceed maxResults , this // value can be used to retrieve the next page of results. This value is null when // there are no more results to return. NextToken *string @@ -149,6 +149,9 @@ func (c *Client) addOperationDescribeImageScanFindingsMiddlewares(stack *middlew if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImageScanFindings(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } @@ -173,11 +176,11 @@ var _ DescribeImageScanFindingsAPIClient = (*Client)(nil) // DescribeImageScanFindings type DescribeImageScanFindingsPaginatorOptions struct { // The maximum number of image scan results returned by DescribeImageScanFindings - // in paginated output. When this parameter is used, DescribeImageScanFindings only - // returns maxResults results in a single page along with a nextToken response - // element. The remaining results of the initial request can be seen by sending - // another DescribeImageScanFindings request with the returned nextToken value. - // This value can be between 1 and 1000. If this parameter is not used, then + // in paginated output. When this parameter is used, DescribeImageScanFindings + // only returns maxResults results in a single page along with a nextToken + // response element. The remaining results of the initial request can be seen by + // sending another DescribeImageScanFindings request with the returned nextToken + // value. This value can be between 1 and 1000. If this parameter is not used, then // DescribeImageScanFindings returns up to 100 results and a nextToken value, if // applicable. Limit int32 @@ -273,9 +276,9 @@ type ImageScanCompleteWaiterOptions struct { // MinDelay must resolve to a value lesser than or equal to the MaxDelay. MinDelay time.Duration - // MaxDelay is the maximum amount of time to delay between retries. If unset or set - // to zero, ImageScanCompleteWaiter will use default max delay of 120 seconds. Note - // that MaxDelay must resolve to value greater than or equal to the MinDelay. + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, ImageScanCompleteWaiter will use default max delay of 120 seconds. + // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. MaxDelay time.Duration // LogWaitAttempts is used to enable logging for waiter retry attempts @@ -323,10 +326,10 @@ func (w *ImageScanCompleteWaiter) Wait(ctx context.Context, params *DescribeImag return err } -// WaitForOutput calls the waiter function for ImageScanComplete waiter and returns -// the output of the successful operation. The maxWaitDur is the maximum wait -// duration the waiter will wait. The maxWaitDur is required and must be greater -// than zero. +// WaitForOutput calls the waiter function for ImageScanComplete waiter and +// returns the output of the successful operation. The maxWaitDur is the maximum +// wait duration the waiter will wait. The maxWaitDur is required and must be +// greater than zero. func (w *ImageScanCompleteWaiter) WaitForOutput(ctx context.Context, params *DescribeImageScanFindingsInput, maxWaitDur time.Duration, optFns ...func(*ImageScanCompleteWaiterOptions)) (*DescribeImageScanFindingsOutput, error) { if maxWaitDur <= 0 { return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go index f4b65429..dd8e354b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go @@ -12,11 +12,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns metadata about the images in a repository. Beginning with Docker version -// 1.9, the Docker client compresses image layers before pushing them to a V2 -// Docker registry. The output of the docker images command shows the uncompressed -// image size, so it may return a larger image size than the image sizes returned -// by DescribeImages. +// Returns metadata about the images in a repository. Beginning with Docker +// version 1.9, the Docker client compresses image layers before pushing them to a +// V2 Docker registry. The output of the docker images command shows the +// uncompressed image size, so it may return a larger image size than the image +// sizes returned by DescribeImages . func (c *Client) DescribeImages(ctx context.Context, params *DescribeImagesInput, optFns ...func(*Options)) (*DescribeImagesOutput, error) { if params == nil { params = &DescribeImagesInput{} @@ -45,21 +45,21 @@ type DescribeImagesInput struct { // The list of image IDs for the requested repository. ImageIds []types.ImageIdentifier - // The maximum number of repository results returned by DescribeImages in paginated - // output. When this parameter is used, DescribeImages only returns maxResults - // results in a single page along with a nextToken response element. The remaining - // results of the initial request can be seen by sending another DescribeImages - // request with the returned nextToken value. This value can be between 1 and 1000. - // If this parameter is not used, then DescribeImages returns up to 100 results and - // a nextToken value, if applicable. This option cannot be used when you specify - // images with imageIds. + // The maximum number of repository results returned by DescribeImages in + // paginated output. When this parameter is used, DescribeImages only returns + // maxResults results in a single page along with a nextToken response element. + // The remaining results of the initial request can be seen by sending another + // DescribeImages request with the returned nextToken value. This value can be + // between 1 and 1000. If this parameter is not used, then DescribeImages returns + // up to 100 results and a nextToken value, if applicable. This option cannot be + // used when you specify images with imageIds . MaxResults *int32 // The nextToken value returned from a previous paginated DescribeImages request // where maxResults was used and the results exceeded the value of that parameter. // Pagination continues from the end of the previous results that returned the // nextToken value. This value is null when there are no more results to return. - // This option cannot be used when you specify images with imageIds. + // This option cannot be used when you specify images with imageIds . NextToken *string // The Amazon Web Services account ID associated with the registry that contains @@ -76,8 +76,8 @@ type DescribeImagesOutput struct { ImageDetails []types.ImageDetail // The nextToken value to include in a future DescribeImages request. When the - // results of a DescribeImages request exceed maxResults, this value can be used to - // retrieve the next page of results. This value is null when there are no more + // results of a DescribeImages request exceed maxResults , this value can be used + // to retrieve the next page of results. This value is null when there are no more // results to return. NextToken *string @@ -138,6 +138,9 @@ func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImages(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } @@ -160,14 +163,14 @@ var _ DescribeImagesAPIClient = (*Client)(nil) // DescribeImagesPaginatorOptions is the paginator options for DescribeImages type DescribeImagesPaginatorOptions struct { - // The maximum number of repository results returned by DescribeImages in paginated - // output. When this parameter is used, DescribeImages only returns maxResults - // results in a single page along with a nextToken response element. The remaining - // results of the initial request can be seen by sending another DescribeImages - // request with the returned nextToken value. This value can be between 1 and 1000. - // If this parameter is not used, then DescribeImages returns up to 100 results and - // a nextToken value, if applicable. This option cannot be used when you specify - // images with imageIds. + // The maximum number of repository results returned by DescribeImages in + // paginated output. When this parameter is used, DescribeImages only returns + // maxResults results in a single page along with a nextToken response element. + // The remaining results of the initial request can be seen by sending another + // DescribeImages request with the returned nextToken value. This value can be + // between 1 and 1000. If this parameter is not used, then DescribeImages returns + // up to 100 results and a nextToken value, if applicable. This option cannot be + // used when you specify images with imageIds . Limit int32 // Set to true if pagination should stop if the service returns a pagination token diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go index 2bfefbd7..fc0b4f64 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go @@ -65,7 +65,7 @@ type DescribePullThroughCacheRulesOutput struct { // The nextToken value to include in a future DescribePullThroughCacheRulesRequest // request. When the results of a DescribePullThroughCacheRulesRequest request - // exceed maxResults, this value can be used to retrieve the next page of results. + // exceed maxResults , this value can be used to retrieve the next page of results. // This value is null when there are no more results to return. NextToken *string @@ -126,6 +126,9 @@ func (c *Client) addOperationDescribePullThroughCacheRulesMiddlewares(stack *mid if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribePullThroughCacheRules(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go index 8d5f4422..79482a79 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go @@ -95,6 +95,9 @@ func (c *Client) addOperationDescribeRegistryMiddlewares(stack *middleware.Stack if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRegistry(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go index cef08268..ccf2a1b1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go @@ -31,13 +31,14 @@ func (c *Client) DescribeRepositories(ctx context.Context, params *DescribeRepos type DescribeRepositoriesInput struct { // The maximum number of repository results returned by DescribeRepositories in - // paginated output. When this parameter is used, DescribeRepositories only returns - // maxResults results in a single page along with a nextToken response element. The - // remaining results of the initial request can be seen by sending another - // DescribeRepositories request with the returned nextToken value. This value can - // be between 1 and 1000. If this parameter is not used, then DescribeRepositories - // returns up to 100 results and a nextToken value, if applicable. This option - // cannot be used when you specify repositories with repositoryNames. + // paginated output. When this parameter is used, DescribeRepositories only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeRepositories request with the returned nextToken value. This + // value can be between 1 and 1000. If this parameter is not used, then + // DescribeRepositories returns up to 100 results and a nextToken value, if + // applicable. This option cannot be used when you specify repositories with + // repositoryNames . MaxResults *int32 // The nextToken value returned from a previous paginated DescribeRepositories @@ -45,7 +46,7 @@ type DescribeRepositoriesInput struct { // parameter. Pagination continues from the end of the previous results that // returned the nextToken value. This value is null when there are no more results // to return. This option cannot be used when you specify repositories with - // repositoryNames. This token should be treated as an opaque identifier that is + // repositoryNames . This token should be treated as an opaque identifier that is // only used to retrieve the next items in a list and not for other programmatic // purposes. NextToken *string @@ -65,7 +66,7 @@ type DescribeRepositoriesInput struct { type DescribeRepositoriesOutput struct { // The nextToken value to include in a future DescribeRepositories request. When - // the results of a DescribeRepositories request exceed maxResults, this value can + // the results of a DescribeRepositories request exceed maxResults , this value can // be used to retrieve the next page of results. This value is null when there are // no more results to return. NextToken *string @@ -127,6 +128,9 @@ func (c *Client) addOperationDescribeRepositoriesMiddlewares(stack *middleware.S if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRepositories(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } @@ -151,13 +155,14 @@ var _ DescribeRepositoriesAPIClient = (*Client)(nil) // DescribeRepositories type DescribeRepositoriesPaginatorOptions struct { // The maximum number of repository results returned by DescribeRepositories in - // paginated output. When this parameter is used, DescribeRepositories only returns - // maxResults results in a single page along with a nextToken response element. The - // remaining results of the initial request can be seen by sending another - // DescribeRepositories request with the returned nextToken value. This value can - // be between 1 and 1000. If this parameter is not used, then DescribeRepositories - // returns up to 100 results and a nextToken value, if applicable. This option - // cannot be used when you specify repositories with repositoryNames. + // paginated output. When this parameter is used, DescribeRepositories only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeRepositories request with the returned nextToken value. This + // value can be between 1 and 1000. If this parameter is not used, then + // DescribeRepositories returns up to 100 results and a nextToken value, if + // applicable. This option cannot be used when you specify repositories with + // repositoryNames . Limit int32 // Set to true if pagination should stop if the service returns a pagination token diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go index 4d6dfbbd..da84860f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go @@ -17,8 +17,7 @@ import ( // hours. The authorizationToken returned is a base64 encoded string that can be // decoded and used in a docker login command to authenticate to a registry. The // CLI offers an get-login-password command that simplifies the login process. For -// more information, see Registry authentication -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html#registry_auth) +// more information, see Registry authentication (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html#registry_auth) // in the Amazon Elastic Container Registry User Guide. func (c *Client) GetAuthorizationToken(ctx context.Context, params *GetAuthorizationTokenInput, optFns ...func(*Options)) (*GetAuthorizationTokenOutput, error) { if params == nil { @@ -110,6 +109,9 @@ func (c *Client) addOperationGetAuthorizationTokenMiddlewares(stack *middleware. if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAuthorizationToken(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go index 82b584fe..1873ca1a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go @@ -10,11 +10,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. -// You can only get URLs for image layers that are referenced in an image. When an -// image is pulled, the GetDownloadUrlForLayer API is called once per image layer -// that is not already cached. This operation is used by the Amazon ECR proxy and -// is not generally used by customers for pulling and pushing images. In most +// Retrieves the pre-signed Amazon S3 download URL corresponding to an image +// layer. You can only get URLs for image layers that are referenced in an image. +// When an image is pulled, the GetDownloadUrlForLayer API is called once per image +// layer that is not already cached. This operation is used by the Amazon ECR proxy +// and is not generally used by customers for pulling and pushing images. In most // cases, you should use the docker CLI to pull, tag, and push images. func (c *Client) GetDownloadUrlForLayer(ctx context.Context, params *GetDownloadUrlForLayerInput, optFns ...func(*Options)) (*GetDownloadUrlForLayerOutput, error) { if params == nil { @@ -116,6 +116,9 @@ func (c *Client) addOperationGetDownloadUrlForLayerMiddlewares(stack *middleware if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetDownloadUrlForLayer(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go index 76e8a061..43baec00 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go @@ -113,6 +113,9 @@ func (c *Client) addOperationGetLifecyclePolicyMiddlewares(stack *middleware.Sta if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetLifecyclePolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go index 5114fd1b..1b6c4c7d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go @@ -50,13 +50,13 @@ type GetLifecyclePolicyPreviewInput struct { // The maximum number of repository results returned by // GetLifecyclePolicyPreviewRequest in
 paginated output. When this parameter is // used, GetLifecyclePolicyPreviewRequest only returns
 maxResults results in a - // single page along with a nextToken
 response element. The remaining results of + // single page along with a nextToken 
 response element. The remaining results of // the initial request can be seen by sending
 another - // GetLifecyclePolicyPreviewRequest request with the returned nextToken
 value. + // GetLifecyclePolicyPreviewRequest request with the returned nextToken 
 value. // This value can be between 1 and 1000. If this
 parameter is not used, then // GetLifecyclePolicyPreviewRequest returns up to
 100 results and a nextToken // value, if
 applicable. This option cannot be used when you specify images with - // imageIds. + // imageIds . MaxResults *int32 // The nextToken value returned from a previous paginated @@ -64,7 +64,7 @@ type GetLifecyclePolicyPreviewInput struct { // results exceeded the value of that parameter. Pagination continues from the end // of the
 previous results that returned the nextToken value. This value is
 null // when there are no more results to return. This option cannot be used when you - // specify images with imageIds. + // specify images with imageIds . NextToken *string // The Amazon Web Services account ID associated with the registry that contains @@ -81,7 +81,7 @@ type GetLifecyclePolicyPreviewOutput struct { LifecyclePolicyText *string // The nextToken value to include in a future GetLifecyclePolicyPreview request. - // When the results of a GetLifecyclePolicyPreview request exceed maxResults, this + // When the results of a GetLifecyclePolicyPreview request exceed maxResults , this // value can be used to retrieve the next page of results. This value is null when // there are no more results to return. NextToken *string @@ -158,6 +158,9 @@ func (c *Client) addOperationGetLifecyclePolicyPreviewMiddlewares(stack *middlew if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetLifecyclePolicyPreview(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } @@ -184,13 +187,13 @@ type GetLifecyclePolicyPreviewPaginatorOptions struct { // The maximum number of repository results returned by // GetLifecyclePolicyPreviewRequest in
 paginated output. When this parameter is // used, GetLifecyclePolicyPreviewRequest only returns
 maxResults results in a - // single page along with a nextToken
 response element. The remaining results of + // single page along with a nextToken 
 response element. The remaining results of // the initial request can be seen by sending
 another - // GetLifecyclePolicyPreviewRequest request with the returned nextToken
 value. + // GetLifecyclePolicyPreviewRequest request with the returned nextToken 
 value. // This value can be between 1 and 1000. If this
 parameter is not used, then // GetLifecyclePolicyPreviewRequest returns up to
 100 results and a nextToken // value, if
 applicable. This option cannot be used when you specify images with - // imageIds. + // imageIds . Limit int32 // Set to true if pagination should stop if the service returns a pagination token @@ -286,10 +289,10 @@ type LifecyclePolicyPreviewCompleteWaiterOptions struct { // MaxDelay. MinDelay time.Duration - // MaxDelay is the maximum amount of time to delay between retries. If unset or set - // to zero, LifecyclePolicyPreviewCompleteWaiter will use default max delay of 120 - // seconds. Note that MaxDelay must resolve to value greater than or equal to the - // MinDelay. + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, LifecyclePolicyPreviewCompleteWaiter will use default max delay of + // 120 seconds. Note that MaxDelay must resolve to value greater than or equal to + // the MinDelay. MaxDelay time.Duration // LogWaitAttempts is used to enable logging for waiter retry attempts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go index b9086f93..995a4c6d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go @@ -92,6 +92,9 @@ func (c *Client) addOperationGetRegistryPolicyMiddlewares(stack *middleware.Stac if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRegistryPolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go index 07f5e4dc..46e3803f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go @@ -93,6 +93,9 @@ func (c *Client) addOperationGetRegistryScanningConfigurationMiddlewares(stack * if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRegistryScanningConfiguration(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go index 15a292ad..0e87864b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go @@ -109,6 +109,9 @@ func (c *Client) addOperationGetRepositoryPolicyMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRepositoryPolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go index d700d833..d00c727e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go @@ -113,6 +113,9 @@ func (c *Client) addOperationInitiateLayerUploadMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opInitiateLayerUpload(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go index b3d096d5..f5cb1750 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go @@ -14,7 +14,7 @@ import ( // Lists all the image IDs for the specified repository. You can filter images // based on whether or not they are tagged by using the tagStatus filter and -// specifying either TAGGED, UNTAGGED or ANY. For example, you can filter your +// specifying either TAGGED , UNTAGGED or ANY . For example, you can filter your // results to return only UNTAGGED images and then pipe that result to a // BatchDeleteImage operation to delete them. Or, you can filter your results to // return only TAGGED images to list all of the tags in your repository. @@ -74,7 +74,7 @@ type ListImagesOutput struct { ImageIds []types.ImageIdentifier // The nextToken value to include in a future ListImages request. When the results - // of a ListImages request exceed maxResults, this value can be used to retrieve + // of a ListImages request exceed maxResults , this value can be used to retrieve // the next page of results. This value is null when there are no more results to // return. NextToken *string @@ -136,6 +136,9 @@ func (c *Client) addOperationListImagesMiddlewares(stack *middleware.Stack, opti if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListImages(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go index 91a372fe..850acd26 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go @@ -100,6 +100,9 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsForResource(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go index 631a9b40..00269b08 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go @@ -11,12 +11,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates or updates the image manifest and tags associated with an image. When an -// image is pushed and all new image layers have been uploaded, the PutImage API is -// called once to create or update the image manifest and the tags associated with -// the image. This operation is used by the Amazon ECR proxy and is not generally -// used by customers for pulling and pushing images. In most cases, you should use -// the docker CLI to pull, tag, and push images. +// Creates or updates the image manifest and tags associated with an image. When +// an image is pushed and all new image layers have been uploaded, the PutImage API +// is called once to create or update the image manifest and the tags associated +// with the image. This operation is used by the Amazon ECR proxy and is not +// generally used by customers for pulling and pushing images. In most cases, you +// should use the docker CLI to pull, tag, and push images. func (c *Client) PutImage(ctx context.Context, params *PutImageInput, optFns ...func(*Options)) (*PutImageOutput, error) { if params == nil { params = &PutImageInput{} @@ -127,6 +127,9 @@ func (c *Client) addOperationPutImageMiddlewares(stack *middleware.Stack, option if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutImage(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go index 74d8fe45..fbbc75ca 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go @@ -13,7 +13,7 @@ import ( // The PutImageScanningConfiguration API is being deprecated, in favor of // specifying the image scanning configuration at the registry level. For more -// information, see PutRegistryScanningConfiguration. Updates the image scanning +// information, see PutRegistryScanningConfiguration . Updates the image scanning // configuration for the specified repository. func (c *Client) PutImageScanningConfiguration(ctx context.Context, params *PutImageScanningConfigurationInput, optFns ...func(*Options)) (*PutImageScanningConfigurationOutput, error) { if params == nil { @@ -121,6 +121,9 @@ func (c *Client) addOperationPutImageScanningConfigurationMiddlewares(stack *mid if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutImageScanningConfiguration(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go index 5e42488a..bd5a2f4f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go @@ -11,9 +11,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Updates the image tag mutability settings for the specified repository. For more -// information, see Image tag mutability -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html) +// Updates the image tag mutability settings for the specified repository. For +// more information, see Image tag mutability (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html) // in the Amazon Elastic Container Registry User Guide. func (c *Client) PutImageTagMutability(ctx context.Context, params *PutImageTagMutabilityInput, optFns ...func(*Options)) (*PutImageTagMutabilityOutput, error) { if params == nil { @@ -120,6 +119,9 @@ func (c *Client) addOperationPutImageTagMutabilityMiddlewares(stack *middleware. if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutImageTagMutability(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go index 782382ee..8a46dfec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go @@ -11,8 +11,8 @@ import ( ) // Creates or updates the lifecycle policy for the specified repository. For more -// information, see Lifecycle policy template -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html). +// information, see Lifecycle policy template (https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html) +// . func (c *Client) PutLifecyclePolicy(ctx context.Context, params *PutLifecyclePolicyInput, optFns ...func(*Options)) (*PutLifecyclePolicyOutput, error) { if params == nil { params = &PutLifecyclePolicyInput{} @@ -116,6 +116,9 @@ func (c *Client) addOperationPutLifecyclePolicyMiddlewares(stack *middleware.Sta if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutLifecyclePolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go index b253e0e0..b02b9353 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go @@ -13,8 +13,7 @@ import ( // Creates or updates the permissions policy for your registry. A registry policy // is used to specify permissions for another Amazon Web Services account and is // used when configuring cross-account replication. For more information, see -// Registry permissions -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html) +// Registry permissions (https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html) // in the Amazon Elastic Container Registry User Guide. func (c *Client) PutRegistryPolicy(ctx context.Context, params *PutRegistryPolicyInput, optFns ...func(*Options)) (*PutRegistryPolicyOutput, error) { if params == nil { @@ -33,9 +32,8 @@ func (c *Client) PutRegistryPolicy(ctx context.Context, params *PutRegistryPolic type PutRegistryPolicyInput struct { - // The JSON policy text to apply to your registry. The policy text follows the same - // format as IAM policy text. For more information, see Registry permissions - // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html) + // The JSON policy text to apply to your registry. The policy text follows the + // same format as IAM policy text. For more information, see Registry permissions (https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html) // in the Amazon Elastic Container Registry User Guide. // // This member is required. @@ -109,6 +107,9 @@ func (c *Client) addOperationPutRegistryPolicyMiddlewares(stack *middleware.Stac if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutRegistryPolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go index fe20cf37..4d0f207c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go @@ -29,13 +29,14 @@ func (c *Client) PutRegistryScanningConfiguration(ctx context.Context, params *P type PutRegistryScanningConfigurationInput struct { - // The scanning rules to use for the registry. A scanning rule is used to determine - // which repository filters are used and at what frequency scanning will occur. + // The scanning rules to use for the registry. A scanning rule is used to + // determine which repository filters are used and at what frequency scanning will + // occur. Rules []types.RegistryScanningRule // The scanning type to set for the registry. When a registry scanning - // configuration is not defined, by default the BASIC scan type is used. When basic - // scanning is used, you may specify filters to determine which individual + // configuration is not defined, by default the BASIC scan type is used. When + // basic scanning is used, you may specify filters to determine which individual // repositories, or all repositories, are scanned when new images are pushed to // those repositories. Alternatively, you can do manual scans of images with basic // scanning. When the ENHANCED scan type is set, Amazon Inspector provides @@ -109,6 +110,9 @@ func (c *Client) addOperationPutRegistryScanningConfigurationMiddlewares(stack * if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutRegistryScanningConfiguration(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go index e8e84df6..5f655241 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go @@ -16,12 +16,11 @@ import ( // DescribeRegistry API action. The first time the PutReplicationConfiguration API // is called, a service-linked IAM role is created in your account for the // replication process. For more information, see Using service-linked roles for -// Amazon ECR -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/using-service-linked-roles.html) +// Amazon ECR (https://docs.aws.amazon.com/AmazonECR/latest/userguide/using-service-linked-roles.html) // in the Amazon Elastic Container Registry User Guide. When configuring // cross-account replication, the destination account must grant the source account // permission to replicate. This permission is controlled using a registry -// permissions policy. For more information, see PutRegistryPolicy. +// permissions policy. For more information, see PutRegistryPolicy . func (c *Client) PutReplicationConfiguration(ctx context.Context, params *PutReplicationConfigurationInput, optFns ...func(*Options)) (*PutReplicationConfigurationOutput, error) { if params == nil { params = &PutReplicationConfigurationInput{} @@ -109,6 +108,9 @@ func (c *Client) addOperationPutReplicationConfigurationMiddlewares(stack *middl if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutReplicationConfiguration(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go index 1078970c..8f0eb1b5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go @@ -11,8 +11,7 @@ import ( ) // Applies a repository policy to the specified repository to control access -// permissions. For more information, see Amazon ECR Repository policies -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html) +// permissions. For more information, see Amazon ECR Repository policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html) // in the Amazon Elastic Container Registry User Guide. func (c *Client) SetRepositoryPolicy(ctx context.Context, params *SetRepositoryPolicyInput, optFns ...func(*Options)) (*SetRepositoryPolicyOutput, error) { if params == nil { @@ -32,8 +31,7 @@ func (c *Client) SetRepositoryPolicy(ctx context.Context, params *SetRepositoryP type SetRepositoryPolicyInput struct { // The JSON repository policy text to apply to the repository. For more - // information, see Amazon ECR repository policies - // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) + // information, see Amazon ECR repository policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) // in the Amazon Elastic Container Registry User Guide. // // This member is required. @@ -44,8 +42,8 @@ type SetRepositoryPolicyInput struct { // This member is required. RepositoryName *string - // If the policy you are attempting to set on a repository policy would prevent you - // from setting another policy in the future, you must force the + // If the policy you are attempting to set on a repository policy would prevent + // you from setting another policy in the future, you must force the // SetRepositoryPolicy operation. This is intended to prevent accidental repository // lock outs. Force bool @@ -126,6 +124,9 @@ func (c *Client) addOperationSetRepositoryPolicyMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSetRepositoryPolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go index 8a9785f4..81b58403 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go @@ -13,9 +13,8 @@ import ( // Starts an image vulnerability scan. An image scan can only be started once per // 24 hours on an individual image. This limit includes if an image was scanned on -// initial push. For more information, see Image scanning -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html) in -// the Amazon Elastic Container Registry User Guide. +// initial push. For more information, see Image scanning (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html) +// in the Amazon Elastic Container Registry User Guide. func (c *Client) StartImageScan(ctx context.Context, params *StartImageScanInput, optFns ...func(*Options)) (*StartImageScanOutput, error) { if params == nil { params = &StartImageScanInput{} @@ -122,6 +121,9 @@ func (c *Client) addOperationStartImageScanMiddlewares(stack *middleware.Stack, if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartImageScan(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go index 1169ac44..69db5c08 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go @@ -11,8 +11,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Starts a preview of a lifecycle policy for the specified repository. This allows -// you to see the results before associating the lifecycle policy with the +// Starts a preview of a lifecycle policy for the specified repository. This +// allows you to see the results before associating the lifecycle policy with the // repository. func (c *Client) StartLifecyclePolicyPreview(ctx context.Context, params *StartLifecyclePolicyPreviewInput, optFns ...func(*Options)) (*StartLifecyclePolicyPreviewOutput, error) { if params == nil { @@ -119,6 +119,9 @@ func (c *Client) addOperationStartLifecyclePolicyPreviewMiddlewares(stack *middl if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartLifecyclePolicyPreview(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go index d283acf6..84e860d5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go @@ -104,6 +104,9 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go index e42275bd..412384aa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go @@ -100,6 +100,9 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go index 4acf7e5b..60abba05 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go @@ -59,9 +59,9 @@ type UploadLayerPartInput struct { // This member is required. UploadId *string - // The Amazon Web Services account ID associated with the registry to which you are - // uploading layer parts. If you do not specify a registry, the default registry is - // assumed. + // The Amazon Web Services account ID associated with the registry to which you + // are uploading layer parts. If you do not specify a registry, the default + // registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -138,6 +138,9 @@ func (c *Client) addOperationUploadLayerPartMiddlewares(stack *middleware.Stack, if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadLayerPart(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go index f9b5512f..79c79b7a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go @@ -87,9 +87,9 @@ func awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response *smithy errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -98,7 +98,7 @@ func awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response *smithy body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -110,8 +110,8 @@ func awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response *smithy } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -204,9 +204,9 @@ func awsAwsjson11_deserializeOpErrorBatchDeleteImage(response *smithyhttp.Respon errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -215,7 +215,7 @@ func awsAwsjson11_deserializeOpErrorBatchDeleteImage(response *smithyhttp.Respon body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -227,8 +227,8 @@ func awsAwsjson11_deserializeOpErrorBatchDeleteImage(response *smithyhttp.Respon } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -321,9 +321,9 @@ func awsAwsjson11_deserializeOpErrorBatchGetImage(response *smithyhttp.Response, errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -332,7 +332,7 @@ func awsAwsjson11_deserializeOpErrorBatchGetImage(response *smithyhttp.Response, body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -344,8 +344,8 @@ func awsAwsjson11_deserializeOpErrorBatchGetImage(response *smithyhttp.Response, } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -438,9 +438,9 @@ func awsAwsjson11_deserializeOpErrorBatchGetRepositoryScanningConfiguration(resp errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -449,7 +449,7 @@ func awsAwsjson11_deserializeOpErrorBatchGetRepositoryScanningConfiguration(resp body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -461,8 +461,8 @@ func awsAwsjson11_deserializeOpErrorBatchGetRepositoryScanningConfiguration(resp } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -558,9 +558,9 @@ func awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -569,7 +569,7 @@ func awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -581,8 +581,8 @@ func awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -693,9 +693,9 @@ func awsAwsjson11_deserializeOpErrorCreatePullThroughCacheRule(response *smithyh errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -704,7 +704,7 @@ func awsAwsjson11_deserializeOpErrorCreatePullThroughCacheRule(response *smithyh body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -716,8 +716,8 @@ func awsAwsjson11_deserializeOpErrorCreatePullThroughCacheRule(response *smithyh } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -819,9 +819,9 @@ func awsAwsjson11_deserializeOpErrorCreateRepository(response *smithyhttp.Respon errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -830,7 +830,7 @@ func awsAwsjson11_deserializeOpErrorCreateRepository(response *smithyhttp.Respon body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -842,8 +842,8 @@ func awsAwsjson11_deserializeOpErrorCreateRepository(response *smithyhttp.Respon } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -948,9 +948,9 @@ func awsAwsjson11_deserializeOpErrorDeleteLifecyclePolicy(response *smithyhttp.R errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -959,7 +959,7 @@ func awsAwsjson11_deserializeOpErrorDeleteLifecyclePolicy(response *smithyhttp.R body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -971,8 +971,8 @@ func awsAwsjson11_deserializeOpErrorDeleteLifecyclePolicy(response *smithyhttp.R } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1068,9 +1068,9 @@ func awsAwsjson11_deserializeOpErrorDeletePullThroughCacheRule(response *smithyh errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1079,7 +1079,7 @@ func awsAwsjson11_deserializeOpErrorDeletePullThroughCacheRule(response *smithyh body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1091,8 +1091,8 @@ func awsAwsjson11_deserializeOpErrorDeletePullThroughCacheRule(response *smithyh } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1188,9 +1188,9 @@ func awsAwsjson11_deserializeOpErrorDeleteRegistryPolicy(response *smithyhttp.Re errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1199,7 +1199,7 @@ func awsAwsjson11_deserializeOpErrorDeleteRegistryPolicy(response *smithyhttp.Re body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1211,8 +1211,8 @@ func awsAwsjson11_deserializeOpErrorDeleteRegistryPolicy(response *smithyhttp.Re } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1308,9 +1308,9 @@ func awsAwsjson11_deserializeOpErrorDeleteRepository(response *smithyhttp.Respon errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1319,7 +1319,7 @@ func awsAwsjson11_deserializeOpErrorDeleteRepository(response *smithyhttp.Respon body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1331,8 +1331,8 @@ func awsAwsjson11_deserializeOpErrorDeleteRepository(response *smithyhttp.Respon } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1431,9 +1431,9 @@ func awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response *smithyhttp. errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1442,7 +1442,7 @@ func awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response *smithyhttp. body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1454,8 +1454,8 @@ func awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response *smithyhttp. } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1551,9 +1551,9 @@ func awsAwsjson11_deserializeOpErrorDescribeImageReplicationStatus(response *smi errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1562,7 +1562,7 @@ func awsAwsjson11_deserializeOpErrorDescribeImageReplicationStatus(response *smi body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1574,8 +1574,8 @@ func awsAwsjson11_deserializeOpErrorDescribeImageReplicationStatus(response *smi } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1674,9 +1674,9 @@ func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1685,7 +1685,7 @@ func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1697,8 +1697,8 @@ func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1794,9 +1794,9 @@ func awsAwsjson11_deserializeOpErrorDescribeImageScanFindings(response *smithyht errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1805,7 +1805,7 @@ func awsAwsjson11_deserializeOpErrorDescribeImageScanFindings(response *smithyht body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1817,8 +1817,8 @@ func awsAwsjson11_deserializeOpErrorDescribeImageScanFindings(response *smithyht } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1920,9 +1920,9 @@ func awsAwsjson11_deserializeOpErrorDescribePullThroughCacheRules(response *smit errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1931,7 +1931,7 @@ func awsAwsjson11_deserializeOpErrorDescribePullThroughCacheRules(response *smit body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1943,8 +1943,8 @@ func awsAwsjson11_deserializeOpErrorDescribePullThroughCacheRules(response *smit } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2040,9 +2040,9 @@ func awsAwsjson11_deserializeOpErrorDescribeRegistry(response *smithyhttp.Respon errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2051,7 +2051,7 @@ func awsAwsjson11_deserializeOpErrorDescribeRegistry(response *smithyhttp.Respon body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2063,8 +2063,8 @@ func awsAwsjson11_deserializeOpErrorDescribeRegistry(response *smithyhttp.Respon } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2157,9 +2157,9 @@ func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Re errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2168,7 +2168,7 @@ func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Re body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2180,8 +2180,8 @@ func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Re } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2274,9 +2274,9 @@ func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.R errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2285,7 +2285,7 @@ func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.R body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2297,8 +2297,8 @@ func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.R } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2388,9 +2388,9 @@ func awsAwsjson11_deserializeOpErrorGetDownloadUrlForLayer(response *smithyhttp. errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2399,7 +2399,7 @@ func awsAwsjson11_deserializeOpErrorGetDownloadUrlForLayer(response *smithyhttp. body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2411,8 +2411,8 @@ func awsAwsjson11_deserializeOpErrorGetDownloadUrlForLayer(response *smithyhttp. } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2511,9 +2511,9 @@ func awsAwsjson11_deserializeOpErrorGetLifecyclePolicy(response *smithyhttp.Resp errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2522,7 +2522,7 @@ func awsAwsjson11_deserializeOpErrorGetLifecyclePolicy(response *smithyhttp.Resp body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2534,8 +2534,8 @@ func awsAwsjson11_deserializeOpErrorGetLifecyclePolicy(response *smithyhttp.Resp } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2631,9 +2631,9 @@ func awsAwsjson11_deserializeOpErrorGetLifecyclePolicyPreview(response *smithyht errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2642,7 +2642,7 @@ func awsAwsjson11_deserializeOpErrorGetLifecyclePolicyPreview(response *smithyht body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2654,8 +2654,8 @@ func awsAwsjson11_deserializeOpErrorGetLifecyclePolicyPreview(response *smithyht } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2751,9 +2751,9 @@ func awsAwsjson11_deserializeOpErrorGetRegistryPolicy(response *smithyhttp.Respo errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2762,7 +2762,7 @@ func awsAwsjson11_deserializeOpErrorGetRegistryPolicy(response *smithyhttp.Respo body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2774,8 +2774,8 @@ func awsAwsjson11_deserializeOpErrorGetRegistryPolicy(response *smithyhttp.Respo } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2871,9 +2871,9 @@ func awsAwsjson11_deserializeOpErrorGetRegistryScanningConfiguration(response *s errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2882,7 +2882,7 @@ func awsAwsjson11_deserializeOpErrorGetRegistryScanningConfiguration(response *s body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2894,8 +2894,8 @@ func awsAwsjson11_deserializeOpErrorGetRegistryScanningConfiguration(response *s } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2988,9 +2988,9 @@ func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2999,7 +2999,7 @@ func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3011,8 +3011,8 @@ func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3108,9 +3108,9 @@ func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3119,7 +3119,7 @@ func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3131,8 +3131,8 @@ func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3228,9 +3228,9 @@ func awsAwsjson11_deserializeOpErrorListImages(response *smithyhttp.Response, me errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3239,7 +3239,7 @@ func awsAwsjson11_deserializeOpErrorListImages(response *smithyhttp.Response, me body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3251,8 +3251,8 @@ func awsAwsjson11_deserializeOpErrorListImages(response *smithyhttp.Response, me } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3345,9 +3345,9 @@ func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3356,7 +3356,7 @@ func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3368,8 +3368,8 @@ func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3462,9 +3462,9 @@ func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, meta errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3473,7 +3473,7 @@ func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, meta body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3485,8 +3485,8 @@ func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, meta } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3600,9 +3600,9 @@ func awsAwsjson11_deserializeOpErrorPutImageScanningConfiguration(response *smit errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3611,7 +3611,7 @@ func awsAwsjson11_deserializeOpErrorPutImageScanningConfiguration(response *smit body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3623,8 +3623,8 @@ func awsAwsjson11_deserializeOpErrorPutImageScanningConfiguration(response *smit } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3720,9 +3720,9 @@ func awsAwsjson11_deserializeOpErrorPutImageTagMutability(response *smithyhttp.R errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3731,7 +3731,7 @@ func awsAwsjson11_deserializeOpErrorPutImageTagMutability(response *smithyhttp.R body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3743,8 +3743,8 @@ func awsAwsjson11_deserializeOpErrorPutImageTagMutability(response *smithyhttp.R } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3837,9 +3837,9 @@ func awsAwsjson11_deserializeOpErrorPutLifecyclePolicy(response *smithyhttp.Resp errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3848,7 +3848,7 @@ func awsAwsjson11_deserializeOpErrorPutLifecyclePolicy(response *smithyhttp.Resp body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3860,8 +3860,8 @@ func awsAwsjson11_deserializeOpErrorPutLifecyclePolicy(response *smithyhttp.Resp } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3954,9 +3954,9 @@ func awsAwsjson11_deserializeOpErrorPutRegistryPolicy(response *smithyhttp.Respo errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3965,7 +3965,7 @@ func awsAwsjson11_deserializeOpErrorPutRegistryPolicy(response *smithyhttp.Respo body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3977,8 +3977,8 @@ func awsAwsjson11_deserializeOpErrorPutRegistryPolicy(response *smithyhttp.Respo } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4071,9 +4071,9 @@ func awsAwsjson11_deserializeOpErrorPutRegistryScanningConfiguration(response *s errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4082,7 +4082,7 @@ func awsAwsjson11_deserializeOpErrorPutRegistryScanningConfiguration(response *s body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4094,8 +4094,8 @@ func awsAwsjson11_deserializeOpErrorPutRegistryScanningConfiguration(response *s } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4188,9 +4188,9 @@ func awsAwsjson11_deserializeOpErrorPutReplicationConfiguration(response *smithy errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4199,7 +4199,7 @@ func awsAwsjson11_deserializeOpErrorPutReplicationConfiguration(response *smithy body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4211,8 +4211,8 @@ func awsAwsjson11_deserializeOpErrorPutReplicationConfiguration(response *smithy } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4305,9 +4305,9 @@ func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4316,7 +4316,7 @@ func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4328,8 +4328,8 @@ func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4422,9 +4422,9 @@ func awsAwsjson11_deserializeOpErrorStartImageScan(response *smithyhttp.Response errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4433,7 +4433,7 @@ func awsAwsjson11_deserializeOpErrorStartImageScan(response *smithyhttp.Response body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4445,8 +4445,8 @@ func awsAwsjson11_deserializeOpErrorStartImageScan(response *smithyhttp.Response } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4551,9 +4551,9 @@ func awsAwsjson11_deserializeOpErrorStartLifecyclePolicyPreview(response *smithy errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4562,7 +4562,7 @@ func awsAwsjson11_deserializeOpErrorStartLifecyclePolicyPreview(response *smithy body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4574,8 +4574,8 @@ func awsAwsjson11_deserializeOpErrorStartLifecyclePolicyPreview(response *smithy } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4674,9 +4674,9 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4685,7 +4685,7 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4697,8 +4697,8 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4797,9 +4797,9 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4808,7 +4808,7 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4820,8 +4820,8 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4920,9 +4920,9 @@ func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Respons errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4931,7 +4931,7 @@ func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Respons body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4943,8 +4943,8 @@ func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Respons } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/doc.go index 602df615..cd150610 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/doc.go @@ -3,14 +3,14 @@ // Package ecr provides the API client, operations, and parameter types for Amazon // EC2 Container Registry. // -// Amazon Elastic Container Registry Amazon Elastic Container Registry (Amazon ECR) -// is a managed container image registry service. Customers can use the familiar -// Docker CLI, or their preferred client, to push, pull, and manage images. Amazon -// ECR provides a secure, scalable, and reliable registry for your Docker or Open -// Container Initiative (OCI) images. Amazon ECR supports private repositories with -// resource-based permissions using IAM so that specific users or Amazon EC2 -// instances can access repositories and images. Amazon ECR has service endpoints -// in each supported Region. For more information, see Amazon ECR endpoints -// (https://docs.aws.amazon.com/general/latest/gr/ecr.html) in the Amazon Web -// Services General Reference. +// Amazon Elastic Container Registry Amazon Elastic Container Registry (Amazon +// ECR) is a managed container image registry service. Customers can use the +// familiar Docker CLI, or their preferred client, to push, pull, and manage +// images. Amazon ECR provides a secure, scalable, and reliable registry for your +// Docker or Open Container Initiative (OCI) images. Amazon ECR supports private +// repositories with resource-based permissions using IAM so that specific users or +// Amazon EC2 instances can access repositories and images. Amazon ECR has service +// endpoints in each supported Region. For more information, see Amazon ECR +// endpoints (https://docs.aws.amazon.com/general/latest/gr/ecr.html) in the Amazon +// Web Services General Reference. package ecr diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go index 8eb705f4..0e83ac5d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go @@ -3,4 +3,4 @@ package ecr // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.18" +const goModuleVersion = "1.18.11" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go index e91c8df9..e2a8a780 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go @@ -89,6 +89,7 @@ var partitionRegexp = struct { AwsCn *regexp.Regexp AwsIso *regexp.Regexp AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp AwsUsGov *regexp.Regexp }{ @@ -96,6 +97,7 @@ var partitionRegexp = struct { AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), } @@ -183,6 +185,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-south-1", }, }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{ + Hostname: "api.ecr.ap-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-2", + }, + }, endpoints.EndpointKey{ Region: "ap-southeast-1", }: endpoints.Endpoint{ @@ -207,6 +217,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-3", }, }, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{ + Hostname: "api.ecr.ap-southeast-4.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-4", + }, + }, endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{ @@ -295,6 +313,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-central-1", }, }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{ + Hostname: "api.ecr.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + }, endpoints.EndpointKey{ Region: "eu-north-1", }: endpoints.Endpoint{ @@ -311,6 +337,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-south-1", }, }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{ + Hostname: "api.ecr.eu-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-2", + }, + }, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{ @@ -624,6 +658,27 @@ var defaultPartitions = endpoints.Partitions{ }, }, }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "api.ecr-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "api.ecr.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, { ID: "aws-us-gov", Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go index 9356aabf..d782c4ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go @@ -154,9 +154,10 @@ const ( LifecyclePolicyPreviewStatusFailed LifecyclePolicyPreviewStatus = "FAILED" ) -// Values returns all known values for LifecyclePolicyPreviewStatus. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// The ordering of this slice is not guaranteed to be stable across updates. +// Values returns all known values for LifecyclePolicyPreviewStatus. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. func (LifecyclePolicyPreviewStatus) Values() []LifecyclePolicyPreviewStatus { return []LifecyclePolicyPreviewStatus{ "IN_PROGRESS", @@ -246,9 +247,10 @@ const ( ScanningRepositoryFilterTypeWildcard ScanningRepositoryFilterType = "WILDCARD" ) -// Values returns all known values for ScanningRepositoryFilterType. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// The ordering of this slice is not guaranteed to be stable across updates. +// Values returns all known values for ScanningRepositoryFilterType. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. func (ScanningRepositoryFilterType) Values() []ScanningRepositoryFilterType { return []ScanningRepositoryFilterType{ "WILDCARD", @@ -312,9 +314,9 @@ const ( TagStatusAny TagStatus = "ANY" ) -// Values returns all known values for TagStatus. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// Values returns all known values for TagStatus. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. func (TagStatus) Values() []TagStatus { return []TagStatus{ "TAGGED", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go index 944907ef..4b4782c5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go @@ -11,6 +11,8 @@ import ( type EmptyUploadException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -23,7 +25,12 @@ func (e *EmptyUploadException) ErrorMessage() string { } return *e.Message } -func (e *EmptyUploadException) ErrorCode() string { return "EmptyUploadException" } +func (e *EmptyUploadException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "EmptyUploadException" + } + return *e.ErrorCodeOverride +} func (e *EmptyUploadException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The specified image has already been pushed, and there were no changes to the @@ -31,6 +38,8 @@ func (e *EmptyUploadException) ErrorFault() smithy.ErrorFault { return smithy.Fa type ImageAlreadyExistsException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -43,7 +52,12 @@ func (e *ImageAlreadyExistsException) ErrorMessage() string { } return *e.Message } -func (e *ImageAlreadyExistsException) ErrorCode() string { return "ImageAlreadyExistsException" } +func (e *ImageAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImageAlreadyExistsException" + } + return *e.ErrorCodeOverride +} func (e *ImageAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The specified image digest does not match the digest that Amazon ECR calculated @@ -51,6 +65,8 @@ func (e *ImageAlreadyExistsException) ErrorFault() smithy.ErrorFault { return sm type ImageDigestDoesNotMatchException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -64,7 +80,10 @@ func (e *ImageDigestDoesNotMatchException) ErrorMessage() string { return *e.Message } func (e *ImageDigestDoesNotMatchException) ErrorCode() string { - return "ImageDigestDoesNotMatchException" + if e == nil || e.ErrorCodeOverride == nil { + return "ImageDigestDoesNotMatchException" + } + return *e.ErrorCodeOverride } func (e *ImageDigestDoesNotMatchException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } @@ -72,6 +91,8 @@ func (e *ImageDigestDoesNotMatchException) ErrorFault() smithy.ErrorFault { retu type ImageNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -84,7 +105,12 @@ func (e *ImageNotFoundException) ErrorMessage() string { } return *e.Message } -func (e *ImageNotFoundException) ErrorCode() string { return "ImageNotFoundException" } +func (e *ImageNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImageNotFoundException" + } + return *e.ErrorCodeOverride +} func (e *ImageNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The specified image is tagged with a tag that already exists. The repository is @@ -92,6 +118,8 @@ func (e *ImageNotFoundException) ErrorFault() smithy.ErrorFault { return smithy. type ImageTagAlreadyExistsException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -104,7 +132,12 @@ func (e *ImageTagAlreadyExistsException) ErrorMessage() string { } return *e.Message } -func (e *ImageTagAlreadyExistsException) ErrorCode() string { return "ImageTagAlreadyExistsException" } +func (e *ImageTagAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImageTagAlreadyExistsException" + } + return *e.ErrorCodeOverride +} func (e *ImageTagAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The layer digest calculation performed by Amazon ECR upon receipt of the image @@ -112,6 +145,8 @@ func (e *ImageTagAlreadyExistsException) ErrorFault() smithy.ErrorFault { return type InvalidLayerException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -124,14 +159,21 @@ func (e *InvalidLayerException) ErrorMessage() string { } return *e.Message } -func (e *InvalidLayerException) ErrorCode() string { return "InvalidLayerException" } +func (e *InvalidLayerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidLayerException" + } + return *e.ErrorCodeOverride +} func (e *InvalidLayerException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The layer part size is not valid, or the first byte specified is not consecutive -// to the last byte of a previous layer part upload. +// The layer part size is not valid, or the first byte specified is not +// consecutive to the last byte of a previous layer part upload. type InvalidLayerPartException struct { Message *string + ErrorCodeOverride *string + RegistryId *string RepositoryName *string UploadId *string @@ -149,7 +191,12 @@ func (e *InvalidLayerPartException) ErrorMessage() string { } return *e.Message } -func (e *InvalidLayerPartException) ErrorCode() string { return "InvalidLayerPartException" } +func (e *InvalidLayerPartException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidLayerPartException" + } + return *e.ErrorCodeOverride +} func (e *InvalidLayerPartException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The specified parameter is invalid. Review the available parameters for the API @@ -157,6 +204,8 @@ func (e *InvalidLayerPartException) ErrorFault() smithy.ErrorFault { return smit type InvalidParameterException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -169,7 +218,12 @@ func (e *InvalidParameterException) ErrorMessage() string { } return *e.Message } -func (e *InvalidParameterException) ErrorCode() string { return "InvalidParameterException" } +func (e *InvalidParameterException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidParameterException" + } + return *e.ErrorCodeOverride +} func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // An invalid parameter has been specified. Tag keys can have a maximum character @@ -178,6 +232,8 @@ func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault { return smit type InvalidTagParameterException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -190,13 +246,20 @@ func (e *InvalidTagParameterException) ErrorMessage() string { } return *e.Message } -func (e *InvalidTagParameterException) ErrorCode() string { return "InvalidTagParameterException" } +func (e *InvalidTagParameterException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidTagParameterException" + } + return *e.ErrorCodeOverride +} func (e *InvalidTagParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The operation failed due to a KMS exception. type KmsException struct { Message *string + ErrorCodeOverride *string + KmsError *string noSmithyDocumentSerde @@ -211,13 +274,20 @@ func (e *KmsException) ErrorMessage() string { } return *e.Message } -func (e *KmsException) ErrorCode() string { return "KmsException" } +func (e *KmsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "KmsException" + } + return *e.ErrorCodeOverride +} func (e *KmsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The image layer already exists in the associated repository. type LayerAlreadyExistsException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -230,14 +300,21 @@ func (e *LayerAlreadyExistsException) ErrorMessage() string { } return *e.Message } -func (e *LayerAlreadyExistsException) ErrorCode() string { return "LayerAlreadyExistsException" } +func (e *LayerAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LayerAlreadyExistsException" + } + return *e.ErrorCodeOverride +} func (e *LayerAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The specified layer is not available because it is not associated with an image. -// Unassociated image layers may be cleaned up at any time. +// The specified layer is not available because it is not associated with an +// image. Unassociated image layers may be cleaned up at any time. type LayerInaccessibleException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -250,13 +327,20 @@ func (e *LayerInaccessibleException) ErrorMessage() string { } return *e.Message } -func (e *LayerInaccessibleException) ErrorCode() string { return "LayerInaccessibleException" } +func (e *LayerInaccessibleException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LayerInaccessibleException" + } + return *e.ErrorCodeOverride +} func (e *LayerInaccessibleException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // Layer parts must be at least 5 MiB in size. type LayerPartTooSmallException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -269,14 +353,21 @@ func (e *LayerPartTooSmallException) ErrorMessage() string { } return *e.Message } -func (e *LayerPartTooSmallException) ErrorCode() string { return "LayerPartTooSmallException" } +func (e *LayerPartTooSmallException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LayerPartTooSmallException" + } + return *e.ErrorCodeOverride +} func (e *LayerPartTooSmallException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The specified layers could not be found, or the specified layer is not valid for -// this repository. +// The specified layers could not be found, or the specified layer is not valid +// for this repository. type LayersNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -289,13 +380,20 @@ func (e *LayersNotFoundException) ErrorMessage() string { } return *e.Message } -func (e *LayersNotFoundException) ErrorCode() string { return "LayersNotFoundException" } +func (e *LayersNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LayersNotFoundException" + } + return *e.ErrorCodeOverride +} func (e *LayersNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The lifecycle policy could not be found, and no policy is set to the repository. type LifecyclePolicyNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -309,7 +407,10 @@ func (e *LifecyclePolicyNotFoundException) ErrorMessage() string { return *e.Message } func (e *LifecyclePolicyNotFoundException) ErrorCode() string { - return "LifecyclePolicyNotFoundException" + if e == nil || e.ErrorCodeOverride == nil { + return "LifecyclePolicyNotFoundException" + } + return *e.ErrorCodeOverride } func (e *LifecyclePolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } @@ -318,6 +419,8 @@ func (e *LifecyclePolicyNotFoundException) ErrorFault() smithy.ErrorFault { retu type LifecyclePolicyPreviewInProgressException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -331,7 +434,10 @@ func (e *LifecyclePolicyPreviewInProgressException) ErrorMessage() string { return *e.Message } func (e *LifecyclePolicyPreviewInProgressException) ErrorCode() string { - return "LifecyclePolicyPreviewInProgressException" + if e == nil || e.ErrorCodeOverride == nil { + return "LifecyclePolicyPreviewInProgressException" + } + return *e.ErrorCodeOverride } func (e *LifecyclePolicyPreviewInProgressException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient @@ -341,6 +447,8 @@ func (e *LifecyclePolicyPreviewInProgressException) ErrorFault() smithy.ErrorFau type LifecyclePolicyPreviewNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -354,19 +462,23 @@ func (e *LifecyclePolicyPreviewNotFoundException) ErrorMessage() string { return *e.Message } func (e *LifecyclePolicyPreviewNotFoundException) ErrorCode() string { - return "LifecyclePolicyPreviewNotFoundException" + if e == nil || e.ErrorCodeOverride == nil { + return "LifecyclePolicyPreviewNotFoundException" + } + return *e.ErrorCodeOverride } func (e *LifecyclePolicyPreviewNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The operation did not succeed because it would have exceeded a service limit for -// your account. For more information, see Amazon ECR service quotas -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) in -// the Amazon Elastic Container Registry User Guide. +// The operation did not succeed because it would have exceeded a service limit +// for your account. For more information, see Amazon ECR service quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) +// in the Amazon Elastic Container Registry User Guide. type LimitExceededException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -379,7 +491,12 @@ func (e *LimitExceededException) ErrorMessage() string { } return *e.Message } -func (e *LimitExceededException) ErrorCode() string { return "LimitExceededException" } +func (e *LimitExceededException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LimitExceededException" + } + return *e.ErrorCodeOverride +} func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // A pull through cache rule with these settings already exists for the private @@ -387,6 +504,8 @@ func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy. type PullThroughCacheRuleAlreadyExistsException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -400,7 +519,10 @@ func (e *PullThroughCacheRuleAlreadyExistsException) ErrorMessage() string { return *e.Message } func (e *PullThroughCacheRuleAlreadyExistsException) ErrorCode() string { - return "PullThroughCacheRuleAlreadyExistsException" + if e == nil || e.ErrorCodeOverride == nil { + return "PullThroughCacheRuleAlreadyExistsException" + } + return *e.ErrorCodeOverride } func (e *PullThroughCacheRuleAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient @@ -411,6 +533,8 @@ func (e *PullThroughCacheRuleAlreadyExistsException) ErrorFault() smithy.ErrorFa type PullThroughCacheRuleNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -424,7 +548,10 @@ func (e *PullThroughCacheRuleNotFoundException) ErrorMessage() string { return *e.Message } func (e *PullThroughCacheRuleNotFoundException) ErrorCode() string { - return "PullThroughCacheRuleNotFoundException" + if e == nil || e.ErrorCodeOverride == nil { + return "PullThroughCacheRuleNotFoundException" + } + return *e.ErrorCodeOverride } func (e *PullThroughCacheRuleNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient @@ -434,6 +561,8 @@ func (e *PullThroughCacheRuleNotFoundException) ErrorFault() smithy.ErrorFault { type ReferencedImagesNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -447,7 +576,10 @@ func (e *ReferencedImagesNotFoundException) ErrorMessage() string { return *e.Message } func (e *ReferencedImagesNotFoundException) ErrorCode() string { - return "ReferencedImagesNotFoundException" + if e == nil || e.ErrorCodeOverride == nil { + return "ReferencedImagesNotFoundException" + } + return *e.ErrorCodeOverride } func (e *ReferencedImagesNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } @@ -455,6 +587,8 @@ func (e *ReferencedImagesNotFoundException) ErrorFault() smithy.ErrorFault { ret type RegistryPolicyNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -468,7 +602,10 @@ func (e *RegistryPolicyNotFoundException) ErrorMessage() string { return *e.Message } func (e *RegistryPolicyNotFoundException) ErrorCode() string { - return "RegistryPolicyNotFoundException" + if e == nil || e.ErrorCodeOverride == nil { + return "RegistryPolicyNotFoundException" + } + return *e.ErrorCodeOverride } func (e *RegistryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } @@ -476,6 +613,8 @@ func (e *RegistryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { retur type RepositoryAlreadyExistsException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -489,7 +628,10 @@ func (e *RepositoryAlreadyExistsException) ErrorMessage() string { return *e.Message } func (e *RepositoryAlreadyExistsException) ErrorCode() string { - return "RepositoryAlreadyExistsException" + if e == nil || e.ErrorCodeOverride == nil { + return "RepositoryAlreadyExistsException" + } + return *e.ErrorCodeOverride } func (e *RepositoryAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } @@ -498,6 +640,8 @@ func (e *RepositoryAlreadyExistsException) ErrorFault() smithy.ErrorFault { retu type RepositoryNotEmptyException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -510,15 +654,22 @@ func (e *RepositoryNotEmptyException) ErrorMessage() string { } return *e.Message } -func (e *RepositoryNotEmptyException) ErrorCode() string { return "RepositoryNotEmptyException" } +func (e *RepositoryNotEmptyException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RepositoryNotEmptyException" + } + return *e.ErrorCodeOverride +} func (e *RepositoryNotEmptyException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The specified repository could not be found. Check the spelling of the specified -// repository and ensure that you are performing operations on the correct -// registry. +// The specified repository could not be found. Check the spelling of the +// specified repository and ensure that you are performing operations on the +// correct registry. type RepositoryNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -531,7 +682,12 @@ func (e *RepositoryNotFoundException) ErrorMessage() string { } return *e.Message } -func (e *RepositoryNotFoundException) ErrorCode() string { return "RepositoryNotFoundException" } +func (e *RepositoryNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RepositoryNotFoundException" + } + return *e.ErrorCodeOverride +} func (e *RepositoryNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The specified repository and registry combination does not have an associated @@ -539,6 +695,8 @@ func (e *RepositoryNotFoundException) ErrorFault() smithy.ErrorFault { return sm type RepositoryPolicyNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -552,7 +710,10 @@ func (e *RepositoryPolicyNotFoundException) ErrorMessage() string { return *e.Message } func (e *RepositoryPolicyNotFoundException) ErrorCode() string { - return "RepositoryPolicyNotFoundException" + if e == nil || e.ErrorCodeOverride == nil { + return "RepositoryPolicyNotFoundException" + } + return *e.ErrorCodeOverride } func (e *RepositoryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } @@ -561,6 +722,8 @@ func (e *RepositoryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { ret type ScanNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -573,13 +736,20 @@ func (e *ScanNotFoundException) ErrorMessage() string { } return *e.Message } -func (e *ScanNotFoundException) ErrorCode() string { return "ScanNotFoundException" } +func (e *ScanNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ScanNotFoundException" + } + return *e.ErrorCodeOverride +} func (e *ScanNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // These errors are usually caused by a server-side issue. type ServerException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -592,14 +762,21 @@ func (e *ServerException) ErrorMessage() string { } return *e.Message } -func (e *ServerException) ErrorCode() string { return "ServerException" } +func (e *ServerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ServerException" + } + return *e.ErrorCodeOverride +} func (e *ServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } -// The list of tags on the repository is over the limit. The maximum number of tags -// that can be applied to a repository is 50. +// The list of tags on the repository is over the limit. The maximum number of +// tags that can be applied to a repository is 50. type TooManyTagsException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -612,13 +789,20 @@ func (e *TooManyTagsException) ErrorMessage() string { } return *e.Message } -func (e *TooManyTagsException) ErrorCode() string { return "TooManyTagsException" } +func (e *TooManyTagsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TooManyTagsException" + } + return *e.ErrorCodeOverride +} func (e *TooManyTagsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The image is of a type that cannot be scanned. type UnsupportedImageTypeException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -631,13 +815,20 @@ func (e *UnsupportedImageTypeException) ErrorMessage() string { } return *e.Message } -func (e *UnsupportedImageTypeException) ErrorCode() string { return "UnsupportedImageTypeException" } +func (e *UnsupportedImageTypeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnsupportedImageTypeException" + } + return *e.ErrorCodeOverride +} func (e *UnsupportedImageTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The specified upstream registry isn't supported. type UnsupportedUpstreamRegistryException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -651,7 +842,10 @@ func (e *UnsupportedUpstreamRegistryException) ErrorMessage() string { return *e.Message } func (e *UnsupportedUpstreamRegistryException) ErrorCode() string { - return "UnsupportedUpstreamRegistryException" + if e == nil || e.ErrorCodeOverride == nil { + return "UnsupportedUpstreamRegistryException" + } + return *e.ErrorCodeOverride } func (e *UnsupportedUpstreamRegistryException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient @@ -662,6 +856,8 @@ func (e *UnsupportedUpstreamRegistryException) ErrorFault() smithy.ErrorFault { type UploadNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -674,13 +870,20 @@ func (e *UploadNotFoundException) ErrorMessage() string { } return *e.Message } -func (e *UploadNotFoundException) ErrorCode() string { return "UploadNotFoundException" } +func (e *UploadNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UploadNotFoundException" + } + return *e.ErrorCodeOverride +} func (e *UploadNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // There was an exception validating this request. type ValidationException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -693,5 +896,10 @@ func (e *ValidationException) ErrorMessage() string { } return *e.Message } -func (e *ValidationException) ErrorCode() string { return "ValidationException" } +func (e *ValidationException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ValidationException" + } + return *e.ErrorCodeOverride +} func (e *ValidationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go index 22afaab8..42509094 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go @@ -26,7 +26,7 @@ type AuthorizationData struct { // A base64-encoded string that contains authorization data for the specified // Amazon ECR registry. When the string is decoded, it is presented in the format - // user:password for private registry authentication using docker login. + // user:password for private registry authentication using docker login . AuthorizationToken *string // The Unix time in seconds and milliseconds when the authorization token expires. @@ -35,8 +35,8 @@ type AuthorizationData struct { // The registry URL to use for this authorization token in a docker login command. // The Amazon ECR registry URL format is - // https://aws_account_id.dkr.ecr.region.amazonaws.com. For example, - // https://012345678910.dkr.ecr.us-east-1.amazonaws.com.. + // https://aws_account_id.dkr.ecr.region.amazonaws.com . For example, + // https://012345678910.dkr.ecr.us-east-1.amazonaws.com .. ProxyEndpoint *string noSmithyDocumentSerde @@ -128,7 +128,7 @@ type CvssScoreDetails struct { type DescribeImagesFilter struct { // The tag status with which to filter your DescribeImages results. You can filter - // results based on whether they are TAGGED or UNTAGGED. + // results based on whether they are TAGGED or UNTAGGED . TagStatus TagStatus noSmithyDocumentSerde @@ -142,8 +142,7 @@ type DescribeImagesFilter struct { // require any action on your part. For more control over the encryption of the // contents of your repository, you can use server-side encryption with Key // Management Service key stored in Key Management Service (KMS) to encrypt your -// images. For more information, see Amazon ECR encryption at rest -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) +// images. For more information, see Amazon ECR encryption at rest (https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) // in the Amazon Elastic Container Registry User Guide. type EncryptionConfiguration struct { @@ -153,14 +152,12 @@ type EncryptionConfiguration struct { // can either use the default Amazon Web Services managed KMS key for Amazon ECR, // or specify your own KMS key, which you already created. For more information, // see Protecting data using server-side encryption with an KMS key stored in Key - // Management Service (SSE-KMS) - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the - // Amazon Simple Storage Service Console Developer Guide. If you use the AES256 - // encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed - // encryption keys which encrypts the images in the repository using an AES-256 - // encryption algorithm. For more information, see Protecting data using - // server-side encryption with Amazon S3-managed encryption keys (SSE-S3) - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // Management Service (SSE-KMS) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) + // in the Amazon Simple Storage Service Console Developer Guide. If you use the + // AES256 encryption type, Amazon ECR uses server-side encryption with Amazon + // S3-managed encryption keys which encrypts the images in the repository using an + // AES-256 encryption algorithm. For more information, see Protecting data using + // server-side encryption with Amazon S3-managed encryption keys (SSE-S3) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) // in the Amazon Simple Storage Service Console Developer Guide. // // This member is required. @@ -276,19 +273,19 @@ type ImageDetail struct { // Docker version 1.9, the Docker client compresses image layers before pushing // them to a V2 Docker registry. The output of the docker images command shows the // uncompressed image size, so it may return a larger image size than the image - // sizes returned by DescribeImages. + // sizes returned by DescribeImages . ImageSizeInBytes *int64 // The list of tags associated with this image. ImageTags []string - // The date and time, expressed in standard JavaScript date format, when Amazon ECR - // recorded the last image pull. Amazon ECR refreshes the last image pull timestamp - // at least once every 24 hours. For example, if you pull an image once a day then - // the lastRecordedPullTime timestamp will indicate the exact time that the image - // was last pulled. However, if you pull an image once an hour, because Amazon ECR - // refreshes the lastRecordedPullTime timestamp at least once every 24 hours, the - // result may not be the exact time that the image was last pulled. + // The date and time, expressed in standard JavaScript date format, when Amazon + // ECR recorded the last image pull. Amazon ECR refreshes the last image pull + // timestamp at least once every 24 hours. For example, if you pull an image once a + // day then the lastRecordedPullTime timestamp will indicate the exact time that + // the image was last pulled. However, if you pull an image once an hour, because + // Amazon ECR refreshes the lastRecordedPullTime timestamp at least once every 24 + // hours, the result may not be the exact time that the image was last pulled. LastRecordedPullTime *time.Time // The Amazon Web Services account ID associated with the registry to which this @@ -408,10 +405,9 @@ type ImageScanFindingsSummary struct { type ImageScanningConfiguration struct { // The setting that determines whether images are scanned after being pushed to a - // repository. If set to true, images will be scanned after being pushed. If this + // repository. If set to true , images will be scanned after being pushed. If this // parameter is not specified, it will default to false and images will not be - // scanned unless a scan is manually started with the API_StartImageScan - // (https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_StartImageScan.html) + // scanned unless a scan is manually started with the API_StartImageScan (https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_StartImageScan.html) // API. ScanOnPush bool @@ -444,7 +440,7 @@ type Layer struct { // The media type of the layer, such as // application/vnd.docker.image.rootfs.diff.tar.gzip or - // application/vnd.oci.image.layer.v1.tar+gzip. + // application/vnd.oci.image.layer.v1.tar+gzip . MediaType *string noSmithyDocumentSerde @@ -518,7 +514,7 @@ type LifecyclePolicyRuleAction struct { type ListImagesFilter struct { // The tag status with which to filter your ListImages results. You can filter - // results based on whether they are TAGGED or UNTAGGED. + // results based on whether they are TAGGED or UNTAGGED . TagStatus TagStatus noSmithyDocumentSerde @@ -570,8 +566,8 @@ type PullThroughCacheRule struct { // The Amazon ECR repository prefix associated with the pull through cache rule. EcrRepositoryPrefix *string - // The Amazon Web Services account ID associated with the registry the pull through - // cache rule is associated with. + // The Amazon Web Services account ID associated with the registry the pull + // through cache rule is associated with. RegistryId *string // The upstream registry URL associated with the pull through cache rule. @@ -615,7 +611,7 @@ type RegistryScanningRule struct { // The frequency that scans are performed at for a private registry. When the // ENHANCED scan type is specified, the supported scan frequencies are - // CONTINUOUS_SCAN and SCAN_ON_PUSH. When the BASIC scan type is specified, the + // CONTINUOUS_SCAN and SCAN_ON_PUSH . When the BASIC scan type is specified, the // SCAN_ON_PUSH and MANUAL scan frequencies are supported. // // This member is required. @@ -704,7 +700,7 @@ type Repository struct { // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains // the arn:aws:ecr namespace, followed by the region of the repository, Amazon Web // Services account ID of the repository owner, repository namespace, and - // repository name. For example, arn:aws:ecr:region:012345678910:repository/test. + // repository name. For example, arn:aws:ecr:region:012345678910:repository/test . RepositoryArn *string // The name of the repository. @@ -730,8 +726,8 @@ type RepositoryFilter struct { // This member is required. Filter *string - // The repository filter type. The only supported value is PREFIX_MATCH, which is a - // repository name prefix specified with the filter parameter. + // The repository filter type. The only supported value is PREFIX_MATCH , which is + // a repository name prefix specified with the filter parameter. // // This member is required. FilterType RepositoryFilterType @@ -797,16 +793,15 @@ type Resource struct { // Contains details about the resource involved in the finding. type ResourceDetails struct { - // An object that contains details about the Amazon ECR container image involved in - // the finding. + // An object that contains details about the Amazon ECR container image involved + // in the finding. AwsEcrContainerImage *AwsEcrContainerImageDetails noSmithyDocumentSerde } // The details of a scanning repository filter. For more information on how to use -// filters, see Using filters -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#image-scanning-filters) +// filters, see Using filters (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#image-scanning-filters) // in the Amazon Elastic Container Registry User Guide. type ScanningRepositoryFilter struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md index e6ed4a07..fb5b96f9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md @@ -1,3 +1,76 @@ +# v1.16.2 (2023-05-04) + +* No change notes available for this release. + +# v1.16.1 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2023-04-11) + +* **Feature**: This release will allow using registry alias as registryId in BatchDeleteImage request. + +# v1.15.8 (2023-04-10) + +* No change notes available for this release. + +# v1.15.7 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.6 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.5 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.15.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.15.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2023-01-09) + +* **Feature**: This release for Amazon ECR Public makes several change to bring the SDK into sync with the API. + +# v1.14.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.13.22 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.21 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.20 (2022-11-30) + +* No change notes available for this release. + +# v1.13.19 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.18 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.17 (2022-09-20) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go index dfd012e2..f6d77354 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go @@ -115,7 +115,7 @@ type Options struct { Retryer aws.Retryer // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You // should not populate this structure programmatically, or rely on the values here // within your applications. RuntimeEnvironment aws.RuntimeEnvironment diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go index 682599a0..0962a06e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go @@ -11,12 +11,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Checks the availability of one or more image layers within a repository in a -// public registry. When an image is pushed to a repository, each image layer is -// checked to verify if it has been uploaded before. If it has been uploaded, then -// the image layer is skipped. This operation is used by the Amazon ECR proxy and -// is not generally used by customers for pulling and pushing images. In most -// cases, you should use the docker CLI to pull, tag, and push images. +// Checks the availability of one or more image layers that are within a +// repository in a public registry. When an image is pushed to a repository, each +// image layer is checked to verify if it has been uploaded before. If it has been +// uploaded, then the image layer is skipped. This operation is used by the Amazon +// ECR proxy and is not generally used by customers for pulling and pushing images. +// In most cases, you should use the docker CLI to pull, tag, and push images. func (c *Client) BatchCheckLayerAvailability(ctx context.Context, params *BatchCheckLayerAvailabilityInput, optFns ...func(*Options)) (*BatchCheckLayerAvailabilityOutput, error) { if params == nil { params = &BatchCheckLayerAvailabilityInput{} @@ -39,14 +39,14 @@ type BatchCheckLayerAvailabilityInput struct { // This member is required. LayerDigests []string - // The name of the repository that is associated with the image layers to check. + // The name of the repository that's associated with the image layers to check. // // This member is required. RepositoryName *string - // The AWS account ID associated with the public registry that contains the image - // layers to check. If you do not specify a registry, the default public registry - // is assumed. + // The Amazon Web Services account ID, or registry alias, associated with the + // public registry that contains the image layers to check. If you do not specify a + // registry, the default public registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -57,8 +57,8 @@ type BatchCheckLayerAvailabilityOutput struct { // Any failures associated with the call. Failures []types.LayerFailure - // A list of image layer objects corresponding to the image layer references in the - // request. + // A list of image layer objects that correspond to the image layer references in + // the request. Layers []types.Layer // Metadata pertaining to the operation's result. @@ -118,6 +118,9 @@ func (c *Client) addOperationBatchCheckLayerAvailabilityMiddlewares(stack *middl if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchCheckLayerAvailability(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go index b208c952..8ea3aa36 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go @@ -11,12 +11,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes a list of specified images within a repository in a public registry. -// Images are specified with either an imageTag or imageDigest. You can remove a -// tag from an image by specifying the image's tag in your request. When you remove -// the last tag from an image, the image is deleted from your repository. You can -// completely delete an image (and all of its tags) by specifying the image's -// digest in your request. +// Deletes a list of specified images that are within a repository in a public +// registry. Images are specified with either an imageTag or imageDigest . You can +// remove a tag from an image by specifying the image's tag in your request. When +// you remove the last tag from an image, the image is deleted from your +// repository. You can completely delete an image (and all of its tags) by +// specifying the digest of the image in your request. func (c *Client) BatchDeleteImage(ctx context.Context, params *BatchDeleteImageInput, optFns ...func(*Options)) (*BatchDeleteImageOutput, error) { if params == nil { params = &BatchDeleteImageInput{} @@ -34,8 +34,8 @@ func (c *Client) BatchDeleteImage(ctx context.Context, params *BatchDeleteImageI type BatchDeleteImageInput struct { - // A list of image ID references that correspond to images to delete. The format of - // the imageIds reference is imageTag=tag or imageDigest=digest. + // A list of image ID references that correspond to images to delete. The format + // of the imageIds reference is imageTag=tag or imageDigest=digest . // // This member is required. ImageIds []types.ImageIdentifier @@ -45,9 +45,9 @@ type BatchDeleteImageInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry that contains the image to - // delete. If you do not specify a registry, the default public registry is - // assumed. + // The Amazon Web Services account ID, or registry alias, that's associated with + // the registry that contains the image to delete. If you do not specify a + // registry, the default public registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -118,6 +118,9 @@ func (c *Client) addOperationBatchDeleteImageMiddlewares(stack *middleware.Stack if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchDeleteImage(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go index 206e55f5..cf1bf511 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go @@ -10,11 +10,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Informs Amazon ECR that the image layer upload has completed for a specified +// Informs Amazon ECR that the image layer upload is complete for a specified // public registry, repository name, and upload ID. You can optionally provide a // sha256 digest of the image layer for data validation purposes. When an image is -// pushed, the CompleteLayerUpload API is called once per each new image layer to -// verify that the upload has completed. This operation is used by the Amazon ECR +// pushed, the CompleteLayerUpload API is called once for each new image layer to +// verify that the upload is complete. This operation is used by the Amazon ECR // proxy and is not generally used by customers for pulling and pushing images. In // most cases, you should use the docker CLI to pull, tag, and push images. func (c *Client) CompleteLayerUpload(ctx context.Context, params *CompleteLayerUploadInput, optFns ...func(*Options)) (*CompleteLayerUploadOutput, error) { @@ -51,8 +51,9 @@ type CompleteLayerUploadInput struct { // This member is required. UploadId *string - // The AWS account ID associated with the registry to which to upload layers. If - // you do not specify a registry, the default public registry is assumed. + // The Amazon Web Services account ID, or registry alias, associated with the + // registry where layers are uploaded. If you do not specify a registry, the + // default public registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -63,13 +64,13 @@ type CompleteLayerUploadOutput struct { // The sha256 digest of the image layer. LayerDigest *string - // The public registry ID associated with the request. + // The public registry ID that's associated with the request. RegistryId *string - // The repository name associated with the request. + // The repository name that's associated with the request. RepositoryName *string - // The upload ID associated with the layer. + // The upload ID that's associated with the layer. UploadId *string // Metadata pertaining to the operation's result. @@ -129,6 +130,9 @@ func (c *Client) addOperationCompleteLayerUploadMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCompleteLayerUpload(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go index 82a2a7e5..9586b3e3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go @@ -12,9 +12,8 @@ import ( ) // Creates a repository in a public registry. For more information, see Amazon ECR -// repositories -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) in -// the Amazon Elastic Container Registry User Guide. +// repositories (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) +// in the Amazon Elastic Container Registry User Guide. func (c *Client) CreateRepository(ctx context.Context, params *CreateRepositoryInput, optFns ...func(*Options)) (*CreateRepositoryOutput, error) { if params == nil { params = &CreateRepositoryInput{} @@ -33,9 +32,9 @@ func (c *Client) CreateRepository(ctx context.Context, params *CreateRepositoryI type CreateRepositoryInput struct { // The name to use for the repository. This appears publicly in the Amazon ECR - // Public Gallery. The repository name may be specified on its own (such as - // nginx-web-app) or it can be prepended with a namespace to group the repository - // into a category (such as project-a/nginx-web-app). + // Public Gallery. The repository name can be specified on its own (for example + // nginx-web-app ) or prepended with a namespace to group the repository into a + // category (for example project-a/nginx-web-app ). // // This member is required. RepositoryName *string @@ -44,10 +43,10 @@ type CreateRepositoryInput struct { // Public Gallery. CatalogData *types.RepositoryCatalogDataInput - // The metadata that you apply to the repository to help you categorize and - // organize them. Each tag consists of a key and an optional value, both of which - // you define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // The metadata that you apply to each repository to help categorize and organize + // your repositories. Each tag consists of a key and an optional value. You define + // both of them. Tag keys can have a maximum character length of 128 characters, + // and tag values can have a maximum length of 256 characters. Tags []types.Tag noSmithyDocumentSerde @@ -119,6 +118,9 @@ func (c *Client) addOperationCreateRepositoryMiddlewares(stack *middleware.Stack if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateRepository(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go index 30503d3b..7fe23d70 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go @@ -12,8 +12,9 @@ import ( ) // Deletes a repository in a public registry. If the repository contains images, -// you must either delete all images in the repository or use the force option -// which deletes all images on your behalf before deleting the repository. +// you must either manually delete all images in the repository or use the force +// option. This option deletes all images on your behalf before deleting the +// repository. func (c *Client) DeleteRepository(ctx context.Context, params *DeleteRepositoryInput, optFns ...func(*Options)) (*DeleteRepositoryOutput, error) { if params == nil { params = &DeleteRepositoryInput{} @@ -36,12 +37,13 @@ type DeleteRepositoryInput struct { // This member is required. RepositoryName *string - // If a repository contains images, forces the deletion. + // The force option can be used to delete a repository that contains images. If + // the force option is not used, the repository must be empty prior to deletion. Force bool - // The AWS account ID associated with the public registry that contains the - // repository to delete. If you do not specify a registry, the default public - // registry is assumed. + // The Amazon Web Services account ID that's associated with the public registry + // that contains the repository to delete. If you do not specify a registry, the + // default public registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -109,6 +111,9 @@ func (c *Client) addOperationDeleteRepositoryMiddlewares(stack *middleware.Stack if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRepository(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go index b6ce507b..20a8b70d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go @@ -10,7 +10,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the repository policy associated with the specified repository. +// Deletes the repository policy that's associated with the specified repository. func (c *Client) DeleteRepositoryPolicy(ctx context.Context, params *DeleteRepositoryPolicyInput, optFns ...func(*Options)) (*DeleteRepositoryPolicyOutput, error) { if params == nil { params = &DeleteRepositoryPolicyInput{} @@ -28,15 +28,15 @@ func (c *Client) DeleteRepositoryPolicy(ctx context.Context, params *DeleteRepos type DeleteRepositoryPolicyInput struct { - // The name of the repository that is associated with the repository policy to + // The name of the repository that's associated with the repository policy to // delete. // // This member is required. RepositoryName *string - // The AWS account ID associated with the public registry that contains the - // repository policy to delete. If you do not specify a registry, the default - // public registry is assumed. + // The Amazon Web Services account ID that's associated with the public registry + // that contains the repository policy to delete. If you do not specify a registry, + // the default public registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -47,10 +47,10 @@ type DeleteRepositoryPolicyOutput struct { // The JSON repository policy that was deleted from the repository. PolicyText *string - // The registry ID associated with the request. + // The registry ID that's associated with the request. RegistryId *string - // The repository name associated with the request. + // The repository name that's associated with the request. RepositoryName *string // Metadata pertaining to the operation's result. @@ -110,6 +110,9 @@ func (c *Client) addOperationDeleteRepositoryPolicyMiddlewares(stack *middleware if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRepositoryPolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go index e59efd5a..b13ed17b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go @@ -35,26 +35,26 @@ type DescribeImageTagsInput struct { // This member is required. RepositoryName *string - // The maximum number of repository results returned by DescribeImageTags in - // paginated output. When this parameter is used, DescribeImageTags only returns - // maxResults results in a single page along with a nextToken response element. The - // remaining results of the initial request can be seen by sending another - // DescribeImageTags request with the returned nextToken value. This value can be - // between 1 and 1000. If this parameter is not used, then DescribeImageTags - // returns up to 100 results and a nextToken value, if applicable. This option - // cannot be used when you specify images with imageIds. + // The maximum number of repository results that's returned by DescribeImageTags + // in paginated output. When this parameter is used, DescribeImageTags only + // returns maxResults results in a single page along with a nextToken response + // element. You can see the remaining results of the initial request by sending + // another DescribeImageTags request with the returned nextToken value. This value + // can be between 1 and 1000. If this parameter isn't used, then DescribeImageTags + // returns up to 100 results and a nextToken value, if applicable. If you specify + // images with imageIds , you can't use this option. MaxResults *int32 - // The nextToken value returned from a previous paginated DescribeImageTags request - // where maxResults was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. This value is null when there are no more results to return. - // This option cannot be used when you specify images with imageIds. + // The nextToken value that's returned from a previous paginated DescribeImageTags + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. If there are no more results to return, this + // value is null . If you specify images with imageIds , you can't use this option. NextToken *string - // The AWS account ID associated with the public registry that contains the - // repository in which to describe images. If you do not specify a registry, the - // default public registry is assumed. + // The Amazon Web Services account ID that's associated with the public registry + // that contains the repository where images are described. If you do not specify a + // registry, the default public registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -66,9 +66,9 @@ type DescribeImageTagsOutput struct { ImageTagDetails []types.ImageTagDetail // The nextToken value to include in a future DescribeImageTags request. When the - // results of a DescribeImageTags request exceed maxResults, this value can be used - // to retrieve the next page of results. This value is null when there are no more - // results to return. + // results of a DescribeImageTags request exceed maxResults , you can use this + // value to retrieve the next page of results. If there are no more results to + // return, this value is null . NextToken *string // Metadata pertaining to the operation's result. @@ -128,6 +128,9 @@ func (c *Client) addOperationDescribeImageTagsMiddlewares(stack *middleware.Stac if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImageTags(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } @@ -150,14 +153,14 @@ var _ DescribeImageTagsAPIClient = (*Client)(nil) // DescribeImageTagsPaginatorOptions is the paginator options for DescribeImageTags type DescribeImageTagsPaginatorOptions struct { - // The maximum number of repository results returned by DescribeImageTags in - // paginated output. When this parameter is used, DescribeImageTags only returns - // maxResults results in a single page along with a nextToken response element. The - // remaining results of the initial request can be seen by sending another - // DescribeImageTags request with the returned nextToken value. This value can be - // between 1 and 1000. If this parameter is not used, then DescribeImageTags - // returns up to 100 results and a nextToken value, if applicable. This option - // cannot be used when you specify images with imageIds. + // The maximum number of repository results that's returned by DescribeImageTags + // in paginated output. When this parameter is used, DescribeImageTags only + // returns maxResults results in a single page along with a nextToken response + // element. You can see the remaining results of the initial request by sending + // another DescribeImageTags request with the returned nextToken value. This value + // can be between 1 and 1000. If this parameter isn't used, then DescribeImageTags + // returns up to 100 results and a nextToken value, if applicable. If you specify + // images with imageIds , you can't use this option. Limit int32 // Set to true if pagination should stop if the service returns a pagination token diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go index 6d6df97a..c070863e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go @@ -12,11 +12,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns metadata about the images in a repository in a public registry. -// Beginning with Docker version 1.9, the Docker client compresses image layers -// before pushing them to a V2 Docker registry. The output of the docker images -// command shows the uncompressed image size, so it may return a larger image size -// than the image sizes returned by DescribeImages. +// Returns metadata that's related to the images in a repository in a public +// registry. Beginning with Docker version 1.9, the Docker client compresses image +// layers before pushing them to a V2 Docker registry. The output of the docker +// images command shows the uncompressed image size. Therefore, it might return a +// larger image size than the image sizes that are returned by DescribeImages . func (c *Client) DescribeImages(ctx context.Context, params *DescribeImagesInput, optFns ...func(*Options)) (*DescribeImagesOutput, error) { if params == nil { params = &DescribeImagesInput{} @@ -42,26 +42,26 @@ type DescribeImagesInput struct { // The list of image IDs for the requested repository. ImageIds []types.ImageIdentifier - // The maximum number of repository results returned by DescribeImages in paginated - // output. When this parameter is used, DescribeImages only returns maxResults - // results in a single page along with a nextToken response element. The remaining - // results of the initial request can be seen by sending another DescribeImages - // request with the returned nextToken value. This value can be between 1 and 1000. - // If this parameter is not used, then DescribeImages returns up to 100 results and - // a nextToken value, if applicable. This option cannot be used when you specify - // images with imageIds. + // The maximum number of repository results that's returned by DescribeImages in + // paginated output. When this parameter is used, DescribeImages only returns + // maxResults results in a single page along with a nextToken response element. + // You can see the remaining results of the initial request by sending another + // DescribeImages request with the returned nextToken value. This value can be + // between 1 and 1000. If this parameter isn't used, then DescribeImages returns + // up to 100 results and a nextToken value, if applicable. If you specify images + // with imageIds , you can't use this option. MaxResults *int32 - // The nextToken value returned from a previous paginated DescribeImages request - // where maxResults was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. This value is null when there are no more results to return. - // This option cannot be used when you specify images with imageIds. + // The nextToken value that's returned from a previous paginated DescribeImages + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. If there are no more results to return, this + // value is null . If you specify images with imageIds , you can't use this option. NextToken *string - // The AWS account ID associated with the public registry that contains the - // repository in which to describe images. If you do not specify a registry, the - // default public registry is assumed. + // The Amazon Web Services account ID that's associated with the public registry + // that contains the repository where images are described. If you do not specify a + // registry, the default public registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -73,9 +73,9 @@ type DescribeImagesOutput struct { ImageDetails []types.ImageDetail // The nextToken value to include in a future DescribeImages request. When the - // results of a DescribeImages request exceed maxResults, this value can be used to - // retrieve the next page of results. This value is null when there are no more - // results to return. + // results of a DescribeImages request exceed maxResults , you can use this value + // to retrieve the next page of results. If there are no more results to return, + // this value is null . NextToken *string // Metadata pertaining to the operation's result. @@ -135,6 +135,9 @@ func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImages(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } @@ -157,14 +160,14 @@ var _ DescribeImagesAPIClient = (*Client)(nil) // DescribeImagesPaginatorOptions is the paginator options for DescribeImages type DescribeImagesPaginatorOptions struct { - // The maximum number of repository results returned by DescribeImages in paginated - // output. When this parameter is used, DescribeImages only returns maxResults - // results in a single page along with a nextToken response element. The remaining - // results of the initial request can be seen by sending another DescribeImages - // request with the returned nextToken value. This value can be between 1 and 1000. - // If this parameter is not used, then DescribeImages returns up to 100 results and - // a nextToken value, if applicable. This option cannot be used when you specify - // images with imageIds. + // The maximum number of repository results that's returned by DescribeImages in + // paginated output. When this parameter is used, DescribeImages only returns + // maxResults results in a single page along with a nextToken response element. + // You can see the remaining results of the initial request by sending another + // DescribeImages request with the returned nextToken value. This value can be + // between 1 and 1000. If this parameter isn't used, then DescribeImages returns + // up to 100 results and a nextToken value, if applicable. If you specify images + // with imageIds , you can't use this option. Limit int32 // Set to true if pagination should stop if the service returns a pagination token diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go index e9297dfe..7c3c4144 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go @@ -30,21 +30,22 @@ func (c *Client) DescribeRegistries(ctx context.Context, params *DescribeRegistr type DescribeRegistriesInput struct { - // The maximum number of repository results returned by DescribeRegistries in - // paginated output. When this parameter is used, DescribeRegistries only returns - // maxResults results in a single page along with a nextToken response element. The - // remaining results of the initial request can be seen by sending another - // DescribeRegistries request with the returned nextToken value. This value can be - // between 1 and 1000. If this parameter is not used, then DescribeRegistries - // returns up to 100 results and a nextToken value, if applicable. + // The maximum number of repository results that's returned by DescribeRegistries + // in paginated output. When this parameter is used, DescribeRegistries only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeRegistries request with the returned nextToken value. This + // value can be between 1 and 1000. If this parameter isn't used, then + // DescribeRegistries returns up to 100 results and a nextToken value, if + // applicable. MaxResults *int32 - // The nextToken value returned from a previous paginated DescribeRegistries + // The nextToken value that's returned from a previous paginated DescribeRegistries // request where maxResults was used and the results exceeded the value of that // parameter. Pagination continues from the end of the previous results that - // returned the nextToken value. This value is null when there are no more results - // to return. This token should be treated as an opaque identifier that is only - // used to retrieve the next items in a list and not for other programmatic + // returned the nextToken value. If there are no more results to return, this + // value is null . This token should be treated as an opaque identifier that is + // only used to retrieve the next items in a list and not for other programmatic // purposes. NextToken *string @@ -53,15 +54,15 @@ type DescribeRegistriesInput struct { type DescribeRegistriesOutput struct { - // An object containing the details for a public registry. + // An object that contains the details for a public registry. // // This member is required. Registries []types.Registry - // The nextToken value to include in a future DescribeRepositories request. When - // the results of a DescribeRepositories request exceed maxResults, this value can - // be used to retrieve the next page of results. This value is null when there are - // no more results to return. + // The nextToken value to include in a future DescribeRepositories request. If the + // results of a DescribeRepositories request exceed maxResults , you can use this + // value to retrieve the next page of results. If there are no more results, this + // value is null . NextToken *string // Metadata pertaining to the operation's result. @@ -118,6 +119,9 @@ func (c *Client) addOperationDescribeRegistriesMiddlewares(stack *middleware.Sta if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRegistries(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } @@ -141,13 +145,14 @@ var _ DescribeRegistriesAPIClient = (*Client)(nil) // DescribeRegistriesPaginatorOptions is the paginator options for // DescribeRegistries type DescribeRegistriesPaginatorOptions struct { - // The maximum number of repository results returned by DescribeRegistries in - // paginated output. When this parameter is used, DescribeRegistries only returns - // maxResults results in a single page along with a nextToken response element. The - // remaining results of the initial request can be seen by sending another - // DescribeRegistries request with the returned nextToken value. This value can be - // between 1 and 1000. If this parameter is not used, then DescribeRegistries - // returns up to 100 results and a nextToken value, if applicable. + // The maximum number of repository results that's returned by DescribeRegistries + // in paginated output. When this parameter is used, DescribeRegistries only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeRegistries request with the returned nextToken value. This + // value can be between 1 and 1000. If this parameter isn't used, then + // DescribeRegistries returns up to 100 results and a nextToken value, if + // applicable. Limit int32 // Set to true if pagination should stop if the service returns a pagination token diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go index 6f446f51..1ad26cf7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Describes repositories in a public registry. +// Describes repositories that are in a public registry. func (c *Client) DescribeRepositories(ctx context.Context, params *DescribeRepositoriesInput, optFns ...func(*Options)) (*DescribeRepositoriesOutput, error) { if params == nil { params = &DescribeRepositoriesInput{} @@ -30,29 +30,30 @@ func (c *Client) DescribeRepositories(ctx context.Context, params *DescribeRepos type DescribeRepositoriesInput struct { - // The maximum number of repository results returned by DescribeRepositories in - // paginated output. When this parameter is used, DescribeRepositories only returns - // maxResults results in a single page along with a nextToken response element. The - // remaining results of the initial request can be seen by sending another - // DescribeRepositories request with the returned nextToken value. This value can - // be between 1 and 1000. If this parameter is not used, then DescribeRepositories - // returns up to 100 results and a nextToken value, if applicable. This option - // cannot be used when you specify repositories with repositoryNames. + // The maximum number of repository results that's returned by DescribeRepositories + // in paginated output. When this parameter is used, DescribeRepositories only + // returns maxResults results in a single page along with a nextToken response + // element. You can see the remaining results of the initial request by sending + // another DescribeRepositories request with the returned nextToken value. This + // value can be between 1 and 1000. If this parameter isn't used, then + // DescribeRepositories returns up to 100 results and a nextToken value, if + // applicable. If you specify repositories with repositoryNames , you can't use + // this option. MaxResults *int32 - // The nextToken value returned from a previous paginated DescribeRepositories - // request where maxResults was used and the results exceeded the value of that - // parameter. Pagination continues from the end of the previous results that - // returned the nextToken value. This value is null when there are no more results - // to return. This option cannot be used when you specify repositories with - // repositoryNames. This token should be treated as an opaque identifier that is - // only used to retrieve the next items in a list and not for other programmatic - // purposes. + // The nextToken value that's returned from a previous paginated + // DescribeRepositories request where maxResults was used and the results exceeded + // the value of that parameter. Pagination continues from the end of the previous + // results that returned the nextToken value. If there are no more results to + // return, this value is null . If you specify repositories with repositoryNames , + // you can't use this option. This token should be treated as an opaque identifier + // that is only used to retrieve the next items in a list and not for other + // programmatic purposes. NextToken *string - // The AWS account ID associated with the registry that contains the repositories - // to be described. If you do not specify a registry, the default public registry - // is assumed. + // The Amazon Web Services account ID that's associated with the registry that + // contains the repositories to be described. If you do not specify a registry, the + // default public registry is assumed. RegistryId *string // A list of repositories to describe. If this parameter is omitted, then all @@ -65,9 +66,9 @@ type DescribeRepositoriesInput struct { type DescribeRepositoriesOutput struct { // The nextToken value to include in a future DescribeRepositories request. When - // the results of a DescribeRepositories request exceed maxResults, this value can - // be used to retrieve the next page of results. This value is null when there are - // no more results to return. + // the results of a DescribeRepositories request exceed maxResults , this value can + // be used to retrieve the next page of results. If there are no more results to + // return, this value is null . NextToken *string // A list of repository objects corresponding to valid repositories. @@ -127,6 +128,9 @@ func (c *Client) addOperationDescribeRepositoriesMiddlewares(stack *middleware.S if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRepositories(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } @@ -150,14 +154,15 @@ var _ DescribeRepositoriesAPIClient = (*Client)(nil) // DescribeRepositoriesPaginatorOptions is the paginator options for // DescribeRepositories type DescribeRepositoriesPaginatorOptions struct { - // The maximum number of repository results returned by DescribeRepositories in - // paginated output. When this parameter is used, DescribeRepositories only returns - // maxResults results in a single page along with a nextToken response element. The - // remaining results of the initial request can be seen by sending another - // DescribeRepositories request with the returned nextToken value. This value can - // be between 1 and 1000. If this parameter is not used, then DescribeRepositories - // returns up to 100 results and a nextToken value, if applicable. This option - // cannot be used when you specify repositories with repositoryNames. + // The maximum number of repository results that's returned by DescribeRepositories + // in paginated output. When this parameter is used, DescribeRepositories only + // returns maxResults results in a single page along with a nextToken response + // element. You can see the remaining results of the initial request by sending + // another DescribeRepositories request with the returned nextToken value. This + // value can be between 1 and 1000. If this parameter isn't used, then + // DescribeRepositories returns up to 100 results and a nextToken value, if + // applicable. If you specify repositories with repositoryNames , you can't use + // this option. Limit int32 // Set to true if pagination should stop if the service returns a pagination token diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go index adbe13a0..0b7eb721 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go @@ -12,7 +12,7 @@ import ( ) // Retrieves an authorization token. An authorization token represents your IAM -// authentication credentials and can be used to access any Amazon ECR registry +// authentication credentials. You can use it to access any Amazon ECR registry // that your IAM principal has access to. The authorization token is valid for 12 // hours. This API requires the ecr-public:GetAuthorizationToken and // sts:GetServiceBearerToken permissions. @@ -94,6 +94,9 @@ func (c *Client) addOperationGetAuthorizationTokenMiddlewares(stack *middleware. if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAuthorizationToken(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go index 88cd5b4b..4f1b2d42 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go @@ -92,6 +92,9 @@ func (c *Client) addOperationGetRegistryCatalogDataMiddlewares(stack *middleware if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRegistryCatalogData(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go index 5c7173fe..fd5877a4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go @@ -35,9 +35,9 @@ type GetRepositoryCatalogDataInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry that contains the repositories - // to be described. If you do not specify a registry, the default public registry - // is assumed. + // The Amazon Web Services account ID that's associated with the registry that + // contains the repositories to be described. If you do not specify a registry, the + // default public registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -105,6 +105,9 @@ func (c *Client) addOperationGetRepositoryCatalogDataMiddlewares(stack *middlewa if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRepositoryCatalogData(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go index 80584f92..90fc8293 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go @@ -33,9 +33,9 @@ type GetRepositoryPolicyInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the public registry that contains the - // repository. If you do not specify a registry, the default public registry is - // assumed. + // The Amazon Web Services account ID that's associated with the public registry + // that contains the repository. If you do not specify a registry, the default + // public registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -43,14 +43,14 @@ type GetRepositoryPolicyInput struct { type GetRepositoryPolicyOutput struct { - // The repository policy text associated with the repository. The policy text will - // be in JSON format. + // The repository policy text that's associated with the repository. The policy + // text will be in JSON format. PolicyText *string - // The registry ID associated with the request. + // The registry ID that's associated with the request. RegistryId *string - // The repository name associated with the request. + // The repository name that's associated with the request. RepositoryName *string // Metadata pertaining to the operation's result. @@ -110,6 +110,9 @@ func (c *Client) addOperationGetRepositoryPolicyMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRepositoryPolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go index 1b4576d7..42ca47b3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go @@ -11,12 +11,11 @@ import ( ) // Notifies Amazon ECR that you intend to upload an image layer. When an image is -// pushed, the InitiateLayerUpload API is called once per image layer that has not -// already been uploaded. Whether or not an image layer has been uploaded is -// determined by the BatchCheckLayerAvailability API action. This operation is used -// by the Amazon ECR proxy and is not generally used by customers for pulling and -// pushing images. In most cases, you should use the docker CLI to pull, tag, and -// push images. +// pushed, the InitiateLayerUpload API is called once for each image layer that +// hasn't already been uploaded. Whether an image layer uploads is determined by +// the BatchCheckLayerAvailability API action. This operation is used by the Amazon +// ECR proxy and is not generally used by customers for pulling and pushing images. +// In most cases, you should use the docker CLI to pull, tag, and push images. func (c *Client) InitiateLayerUpload(ctx context.Context, params *InitiateLayerUploadInput, optFns ...func(*Options)) (*InitiateLayerUploadOutput, error) { if params == nil { params = &InitiateLayerUploadInput{} @@ -34,14 +33,14 @@ func (c *Client) InitiateLayerUpload(ctx context.Context, params *InitiateLayerU type InitiateLayerUploadInput struct { - // The name of the repository to which you intend to upload layers. + // The name of the repository that you want to upload layers to. // // This member is required. RepositoryName *string - // The AWS account ID associated with the registry to which you intend to upload - // layers. If you do not specify a registry, the default public registry is - // assumed. + // The Amazon Web Services account ID, or registry alias, that's associated with + // the registry to which you intend to upload layers. If you do not specify a + // registry, the default public registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -113,6 +112,9 @@ func (c *Client) addOperationInitiateLayerUploadMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opInitiateLayerUpload(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go index 6db00627..0ffad3cc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go @@ -29,8 +29,8 @@ func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForRes type ListTagsForResourceInput struct { - // The Amazon Resource Name (ARN) that identifies the resource for which to list - // the tags. Currently, the supported resource is an Amazon ECR Public repository. + // The Amazon Resource Name (ARN) that identifies the resource to list the tags + // for. Currently, the supported resource is an Amazon ECR Public repository. // // This member is required. ResourceArn *string @@ -100,6 +100,9 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsForResource(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go index 3a2cbdbd..91d681d5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go @@ -11,12 +11,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates or updates the image manifest and tags associated with an image. When an -// image is pushed and all new image layers have been uploaded, the PutImage API is -// called once to create or update the image manifest and the tags associated with -// the image. This operation is used by the Amazon ECR proxy and is not generally -// used by customers for pulling and pushing images. In most cases, you should use -// the docker CLI to pull, tag, and push images. +// Creates or updates the image manifest and tags that are associated with an +// image. When an image is pushed and all new image layers have been uploaded, the +// PutImage API is called once to create or update the image manifest and the tags +// that are associated with the image. This operation is used by the Amazon ECR +// proxy and is not generally used by customers for pulling and pushing images. In +// most cases, you should use the docker CLI to pull, tag, and push images. func (c *Client) PutImage(ctx context.Context, params *PutImageInput, optFns ...func(*Options)) (*PutImageOutput, error) { if params == nil { params = &PutImageInput{} @@ -34,22 +34,22 @@ func (c *Client) PutImage(ctx context.Context, params *PutImageInput, optFns ... type PutImageInput struct { - // The image manifest corresponding to the image to be uploaded. + // The image manifest that corresponds to the image to be uploaded. // // This member is required. ImageManifest *string - // The name of the repository in which to put the image. + // The name of the repository where the image is put. // // This member is required. RepositoryName *string - // The image digest of the image manifest corresponding to the image. + // The image digest of the image manifest that corresponds to the image. ImageDigest *string - // The media type of the image manifest. If you push an image manifest that does - // not contain the mediaType field, you must specify the imageManifestMediaType in - // the request. + // The media type of the image manifest. If you push an image manifest that + // doesn't contain the mediaType field, you must specify the imageManifestMediaType + // in the request. ImageManifestMediaType *string // The tag to associate with the image. This parameter is required for images that @@ -57,9 +57,9 @@ type PutImageInput struct { // formats. ImageTag *string - // The AWS account ID associated with the public registry that contains the - // repository in which to put the image. If you do not specify a registry, the - // default public registry is assumed. + // The Amazon Web Services account ID, or registry alias, that's associated with + // the public registry that contains the repository where the image is put. If you + // do not specify a registry, the default public registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -127,6 +127,9 @@ func (c *Client) addOperationPutImageMiddlewares(stack *middleware.Stack, option if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutImage(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go index 994364a5..c6677f81 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go @@ -11,7 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Create or updates the catalog data for a public registry. +// Create or update the catalog data for a public registry. func (c *Client) PutRegistryCatalogData(ctx context.Context, params *PutRegistryCatalogDataInput, optFns ...func(*Options)) (*PutRegistryCatalogDataOutput, error) { if params == nil { params = &PutRegistryCatalogDataInput{} @@ -98,6 +98,9 @@ func (c *Client) addOperationPutRegistryCatalogDataMiddlewares(stack *middleware if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutRegistryCatalogData(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go index 4510287f..70e5d3c3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go @@ -40,8 +40,9 @@ type PutRepositoryCatalogDataInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the public registry the repository is in. If - // you do not specify a registry, the default public registry is assumed. + // The Amazon Web Services account ID that's associated with the public registry + // the repository is in. If you do not specify a registry, the default public + // registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -109,6 +110,9 @@ func (c *Client) addOperationPutRepositoryCatalogDataMiddlewares(stack *middlewa if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutRepositoryCatalogData(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go index 46fdd68a..ff5cbd65 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go @@ -10,9 +10,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Applies a repository policy to the specified public repository to control access -// permissions. For more information, see Amazon ECR Repository Policies -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html) +// Applies a repository policy to the specified public repository to control +// access permissions. For more information, see Amazon ECR Repository Policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html) // in the Amazon Elastic Container Registry User Guide. func (c *Client) SetRepositoryPolicy(ctx context.Context, params *SetRepositoryPolicyInput, optFns ...func(*Options)) (*SetRepositoryPolicyOutput, error) { if params == nil { @@ -32,8 +31,7 @@ func (c *Client) SetRepositoryPolicy(ctx context.Context, params *SetRepositoryP type SetRepositoryPolicyInput struct { // The JSON repository policy text to apply to the repository. For more - // information, see Amazon ECR Repository Policies - // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) + // information, see Amazon ECR Repository Policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) // in the Amazon Elastic Container Registry User Guide. // // This member is required. @@ -44,14 +42,14 @@ type SetRepositoryPolicyInput struct { // This member is required. RepositoryName *string - // If the policy you are attempting to set on a repository policy would prevent you + // If the policy that you want to set on a repository policy would prevent you // from setting another policy in the future, you must force the - // SetRepositoryPolicy operation. This is intended to prevent accidental repository - // lock outs. + // SetRepositoryPolicy operation. This prevents accidental repository lockouts. Force bool - // The AWS account ID associated with the registry that contains the repository. If - // you do not specify a registry, the default public registry is assumed. + // The Amazon Web Services account ID that's associated with the registry that + // contains the repository. If you do not specify a registry, the default public + // registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -59,13 +57,13 @@ type SetRepositoryPolicyInput struct { type SetRepositoryPolicyOutput struct { - // The JSON repository policy text applied to the repository. + // The JSON repository policy text that's applied to the repository. PolicyText *string - // The registry ID associated with the request. + // The registry ID that's associated with the request. RegistryId *string - // The repository name associated with the request. + // The repository name that's associated with the request. RepositoryName *string // Metadata pertaining to the operation's result. @@ -125,6 +123,9 @@ func (c *Client) addOperationSetRepositoryPolicyMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSetRepositoryPolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go index 0e704400..b4cf870d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go @@ -11,10 +11,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Associates the specified tags to a resource with the specified resourceArn. If -// existing tags on a resource are not specified in the request parameters, they -// are not changed. When a resource is deleted, the tags associated with that -// resource are deleted as well. +// Associates the specified tags to a resource with the specified resourceArn . If +// existing tags on a resource aren't specified in the request parameters, they +// aren't changed. When a resource is deleted, the tags associated with that +// resource are also deleted. func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { params = &TagResourceInput{} @@ -32,8 +32,8 @@ func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optF type TagResourceInput struct { - // The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, - // the supported resource is an Amazon ECR Public repository. + // The Amazon Resource Name (ARN) of the resource to add tags to. Currently, the + // supported resource is an Amazon ECR Public repository. // // This member is required. ResourceArn *string @@ -106,6 +106,9 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go index 8f042d4c..66502ca4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go @@ -28,8 +28,8 @@ func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, type UntagResourceInput struct { - // The Amazon Resource Name (ARN) of the resource from which to delete tags. - // Currently, the supported resource is an Amazon ECR Public repository. + // The Amazon Resource Name (ARN) of the resource to delete tags from. Currently, + // the supported resource is an Amazon ECR Public repository. // // This member is required. ResourceArn *string @@ -100,6 +100,9 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go index ce3fa0a4..f194f0c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go @@ -12,10 +12,10 @@ import ( // Uploads an image layer part to Amazon ECR. When an image is pushed, each new // image layer is uploaded in parts. The maximum size of each image layer part can -// be 20971520 bytes (or about 20MB). The UploadLayerPart API is called once per -// each new image layer part. This operation is used by the Amazon ECR proxy and is -// not generally used by customers for pulling and pushing images. In most cases, -// you should use the docker CLI to pull, tag, and push images. +// be 20971520 bytes (about 20MB). The UploadLayerPart API is called once for each +// new image layer part. This operation is used by the Amazon ECR proxy and is not +// generally used by customers for pulling and pushing images. In most cases, you +// should use the docker CLI to pull, tag, and push images. func (c *Client) UploadLayerPart(ctx context.Context, params *UploadLayerPartInput, optFns ...func(*Options)) (*UploadLayerPartOutput, error) { if params == nil { params = &UploadLayerPartInput{} @@ -48,7 +48,7 @@ type UploadLayerPartInput struct { // This member is required. PartLastByte *int64 - // The name of the repository to which you are uploading layer parts. + // The name of the repository that you're uploading layer parts to. // // This member is required. RepositoryName *string @@ -59,8 +59,9 @@ type UploadLayerPartInput struct { // This member is required. UploadId *string - // The AWS account ID associated with the registry to which you are uploading layer - // parts. If you do not specify a registry, the default public registry is assumed. + // The Amazon Web Services account ID, or registry alias, that's associated with + // the registry that you're uploading layer parts to. If you do not specify a + // registry, the default public registry is assumed. RegistryId *string noSmithyDocumentSerde @@ -68,16 +69,16 @@ type UploadLayerPartInput struct { type UploadLayerPartOutput struct { - // The integer value of the last byte received in the request. + // The integer value of the last byte that's received in the request. LastByteReceived *int64 - // The registry ID associated with the request. + // The registry ID that's associated with the request. RegistryId *string - // The repository name associated with the request. + // The repository name that's associated with the request. RepositoryName *string - // The upload ID associated with the request. + // The upload ID that's associated with the request. UploadId *string // Metadata pertaining to the operation's result. @@ -137,6 +138,9 @@ func (c *Client) addOperationUploadLayerPartMiddlewares(stack *middleware.Stack, if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadLayerPart(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go index 2a9ac620..0ef759a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go @@ -86,9 +86,9 @@ func awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response *smithy errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -97,7 +97,7 @@ func awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response *smithy body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -109,8 +109,8 @@ func awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response *smithy } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -129,6 +129,9 @@ func awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response *smithy case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -206,9 +209,9 @@ func awsAwsjson11_deserializeOpErrorBatchDeleteImage(response *smithyhttp.Respon errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -217,7 +220,7 @@ func awsAwsjson11_deserializeOpErrorBatchDeleteImage(response *smithyhttp.Respon body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -229,8 +232,8 @@ func awsAwsjson11_deserializeOpErrorBatchDeleteImage(response *smithyhttp.Respon } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -246,6 +249,9 @@ func awsAwsjson11_deserializeOpErrorBatchDeleteImage(response *smithyhttp.Respon case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -323,9 +329,9 @@ func awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -334,7 +340,7 @@ func awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -346,8 +352,8 @@ func awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -461,9 +467,9 @@ func awsAwsjson11_deserializeOpErrorCreateRepository(response *smithyhttp.Respon errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -472,7 +478,7 @@ func awsAwsjson11_deserializeOpErrorCreateRepository(response *smithyhttp.Respon body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -484,8 +490,8 @@ func awsAwsjson11_deserializeOpErrorCreateRepository(response *smithyhttp.Respon } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -510,6 +516,9 @@ func awsAwsjson11_deserializeOpErrorCreateRepository(response *smithyhttp.Respon case strings.EqualFold("TooManyTagsException", errorCode): return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -587,9 +596,9 @@ func awsAwsjson11_deserializeOpErrorDeleteRepository(response *smithyhttp.Respon errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -598,7 +607,7 @@ func awsAwsjson11_deserializeOpErrorDeleteRepository(response *smithyhttp.Respon body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -610,8 +619,8 @@ func awsAwsjson11_deserializeOpErrorDeleteRepository(response *smithyhttp.Respon } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -630,6 +639,9 @@ func awsAwsjson11_deserializeOpErrorDeleteRepository(response *smithyhttp.Respon case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -707,9 +719,9 @@ func awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response *smithyhttp. errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -718,7 +730,7 @@ func awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response *smithyhttp. body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -730,8 +742,8 @@ func awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response *smithyhttp. } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -750,6 +762,9 @@ func awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response *smithyhttp. case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -827,9 +842,9 @@ func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -838,7 +853,7 @@ func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -850,8 +865,8 @@ func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -870,6 +885,9 @@ func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -947,9 +965,9 @@ func awsAwsjson11_deserializeOpErrorDescribeImageTags(response *smithyhttp.Respo errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -958,7 +976,7 @@ func awsAwsjson11_deserializeOpErrorDescribeImageTags(response *smithyhttp.Respo body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -970,8 +988,8 @@ func awsAwsjson11_deserializeOpErrorDescribeImageTags(response *smithyhttp.Respo } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -987,6 +1005,9 @@ func awsAwsjson11_deserializeOpErrorDescribeImageTags(response *smithyhttp.Respo case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -1064,9 +1085,9 @@ func awsAwsjson11_deserializeOpErrorDescribeRegistries(response *smithyhttp.Resp errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1075,7 +1096,7 @@ func awsAwsjson11_deserializeOpErrorDescribeRegistries(response *smithyhttp.Resp body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1087,8 +1108,8 @@ func awsAwsjson11_deserializeOpErrorDescribeRegistries(response *smithyhttp.Resp } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1181,9 +1202,9 @@ func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Re errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1192,7 +1213,7 @@ func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Re body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1204,8 +1225,8 @@ func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Re } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1221,6 +1242,9 @@ func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Re case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -1298,9 +1322,9 @@ func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.R errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1309,7 +1333,7 @@ func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.R body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1321,8 +1345,8 @@ func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.R } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1335,6 +1359,9 @@ func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.R case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -1412,9 +1439,9 @@ func awsAwsjson11_deserializeOpErrorGetRegistryCatalogData(response *smithyhttp. errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1423,7 +1450,7 @@ func awsAwsjson11_deserializeOpErrorGetRegistryCatalogData(response *smithyhttp. body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1435,8 +1462,8 @@ func awsAwsjson11_deserializeOpErrorGetRegistryCatalogData(response *smithyhttp. } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1526,9 +1553,9 @@ func awsAwsjson11_deserializeOpErrorGetRepositoryCatalogData(response *smithyhtt errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1537,7 +1564,7 @@ func awsAwsjson11_deserializeOpErrorGetRepositoryCatalogData(response *smithyhtt body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1549,8 +1576,8 @@ func awsAwsjson11_deserializeOpErrorGetRepositoryCatalogData(response *smithyhtt } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1560,12 +1587,18 @@ func awsAwsjson11_deserializeOpErrorGetRepositoryCatalogData(response *smithyhtt case strings.EqualFold("InvalidParameterException", errorCode): return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + case strings.EqualFold("RepositoryCatalogDataNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryCatalogDataNotFoundException(response, errorBody) + case strings.EqualFold("RepositoryNotFoundException", errorCode): return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -1643,9 +1676,9 @@ func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1654,7 +1687,7 @@ func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1666,8 +1699,8 @@ func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1686,6 +1719,9 @@ func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Res case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -1763,9 +1799,9 @@ func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1774,7 +1810,7 @@ func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1786,8 +1822,8 @@ func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1886,9 +1922,9 @@ func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1897,7 +1933,7 @@ func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1909,8 +1945,8 @@ func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1926,6 +1962,9 @@ func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Res case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -2003,9 +2042,9 @@ func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, meta errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2014,7 +2053,7 @@ func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, meta body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2026,8 +2065,8 @@ func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, meta } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2144,9 +2183,9 @@ func awsAwsjson11_deserializeOpErrorPutRegistryCatalogData(response *smithyhttp. errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2155,7 +2194,7 @@ func awsAwsjson11_deserializeOpErrorPutRegistryCatalogData(response *smithyhttp. body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2167,8 +2206,8 @@ func awsAwsjson11_deserializeOpErrorPutRegistryCatalogData(response *smithyhttp. } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2261,9 +2300,9 @@ func awsAwsjson11_deserializeOpErrorPutRepositoryCatalogData(response *smithyhtt errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2272,7 +2311,7 @@ func awsAwsjson11_deserializeOpErrorPutRepositoryCatalogData(response *smithyhtt body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2284,8 +2323,8 @@ func awsAwsjson11_deserializeOpErrorPutRepositoryCatalogData(response *smithyhtt } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2301,6 +2340,9 @@ func awsAwsjson11_deserializeOpErrorPutRepositoryCatalogData(response *smithyhtt case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -2378,9 +2420,9 @@ func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2389,7 +2431,7 @@ func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2401,8 +2443,8 @@ func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2418,6 +2460,9 @@ func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Res case strings.EqualFold("ServerException", errorCode): return awsAwsjson11_deserializeErrorServerException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -2495,9 +2540,9 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2506,7 +2551,7 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2518,8 +2563,8 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2541,6 +2586,9 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m case strings.EqualFold("TooManyTagsException", errorCode): return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -2618,9 +2666,9 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2629,7 +2677,7 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2641,8 +2689,8 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2664,6 +2712,9 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, case strings.EqualFold("TooManyTagsException", errorCode): return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -2741,9 +2792,9 @@ func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Respons errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2752,7 +2803,7 @@ func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Respons body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2764,8 +2815,8 @@ func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Respons } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3366,6 +3417,41 @@ func awsAwsjson11_deserializeErrorRepositoryAlreadyExistsException(response *smi return output } +func awsAwsjson11_deserializeErrorRepositoryCatalogDataNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.RepositoryCatalogDataNotFoundException{} + err := awsAwsjson11_deserializeDocumentRepositoryCatalogDataNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + func awsAwsjson11_deserializeErrorRepositoryNotEmptyException(response *smithyhttp.Response, errorBody *bytes.Reader) error { var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) @@ -5637,6 +5723,46 @@ func awsAwsjson11_deserializeDocumentRepositoryCatalogData(v **types.RepositoryC return nil } +func awsAwsjson11_deserializeDocumentRepositoryCatalogDataNotFoundException(v **types.RepositoryCatalogDataNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryCatalogDataNotFoundException + if *v == nil { + sv = &types.RepositoryCatalogDataNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentRepositoryList(v *[]types.Repository, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/doc.go index a17297a0..8706038f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/doc.go @@ -4,12 +4,12 @@ // Amazon Elastic Container Registry Public. // // Amazon Elastic Container Registry Public Amazon Elastic Container Registry -// (Amazon ECR) is a managed container image registry service. Amazon ECR provides -// both public and private registries to host your container images. You can use -// the familiar Docker CLI, or their preferred client, to push, pull, and manage +// Public (Amazon ECR Public) is a managed container image registry service. Amazon +// ECR provides both public and private registries to host your container images. +// You can use the Docker CLI or your preferred client to push, pull, and manage // images. Amazon ECR provides a secure, scalable, and reliable registry for your // Docker or Open Container Initiative (OCI) images. Amazon ECR supports public // repositories with this API. For information about the Amazon ECR API for private -// repositories, see Amazon Elastic Container Registry API Reference -// (https://docs.aws.amazon.com/AmazonECR/latest/APIReference/Welcome.html). +// repositories, see Amazon Elastic Container Registry API Reference (https://docs.aws.amazon.com/AmazonECR/latest/APIReference/Welcome.html) +// . package ecrpublic diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go index 274a1789..70b89eb7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go @@ -3,4 +3,4 @@ package ecrpublic // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.17" +const goModuleVersion = "1.16.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go index 356bda29..0f540b2c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go @@ -89,6 +89,7 @@ var partitionRegexp = struct { AwsCn *regexp.Regexp AwsIso *regexp.Regexp AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp AwsUsGov *regexp.Regexp }{ @@ -96,6 +97,7 @@ var partitionRegexp = struct { AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), } @@ -134,6 +136,24 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.Aws, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr-public.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "api.ecr-public.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + }, }, { ID: "aws-cn", @@ -212,6 +232,27 @@ var defaultPartitions = endpoints.Partitions{ RegionRegex: partitionRegexp.AwsIsoB, IsRegionalized: true, }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "api.ecr-public-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "api.ecr-public.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, { ID: "aws-us-gov", Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/errors.go index 485ac0c1..24f4d43c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/errors.go @@ -7,10 +7,12 @@ import ( smithy "github.com/aws/smithy-go" ) -// The specified layer upload does not contain any layer parts. +// The specified layer upload doesn't contain any layer parts. type EmptyUploadException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -23,7 +25,12 @@ func (e *EmptyUploadException) ErrorMessage() string { } return *e.Message } -func (e *EmptyUploadException) ErrorCode() string { return "EmptyUploadException" } +func (e *EmptyUploadException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "EmptyUploadException" + } + return *e.ErrorCodeOverride +} func (e *EmptyUploadException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The specified image has already been pushed, and there were no changes to the @@ -31,6 +38,8 @@ func (e *EmptyUploadException) ErrorFault() smithy.ErrorFault { return smithy.Fa type ImageAlreadyExistsException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -43,14 +52,21 @@ func (e *ImageAlreadyExistsException) ErrorMessage() string { } return *e.Message } -func (e *ImageAlreadyExistsException) ErrorCode() string { return "ImageAlreadyExistsException" } +func (e *ImageAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImageAlreadyExistsException" + } + return *e.ErrorCodeOverride +} func (e *ImageAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The specified image digest does not match the digest that Amazon ECR calculated +// The specified image digest doesn't match the digest that Amazon ECR calculated // for the image. type ImageDigestDoesNotMatchException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -64,14 +80,19 @@ func (e *ImageDigestDoesNotMatchException) ErrorMessage() string { return *e.Message } func (e *ImageDigestDoesNotMatchException) ErrorCode() string { - return "ImageDigestDoesNotMatchException" + if e == nil || e.ErrorCodeOverride == nil { + return "ImageDigestDoesNotMatchException" + } + return *e.ErrorCodeOverride } func (e *ImageDigestDoesNotMatchException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The image requested does not exist in the specified repository. +// The image requested doesn't exist in the specified repository. type ImageNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -84,7 +105,12 @@ func (e *ImageNotFoundException) ErrorMessage() string { } return *e.Message } -func (e *ImageNotFoundException) ErrorCode() string { return "ImageNotFoundException" } +func (e *ImageNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImageNotFoundException" + } + return *e.ErrorCodeOverride +} func (e *ImageNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The specified image is tagged with a tag that already exists. The repository is @@ -92,6 +118,8 @@ func (e *ImageNotFoundException) ErrorFault() smithy.ErrorFault { return smithy. type ImageTagAlreadyExistsException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -104,14 +132,21 @@ func (e *ImageTagAlreadyExistsException) ErrorMessage() string { } return *e.Message } -func (e *ImageTagAlreadyExistsException) ErrorCode() string { return "ImageTagAlreadyExistsException" } +func (e *ImageTagAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImageTagAlreadyExistsException" + } + return *e.ErrorCodeOverride +} func (e *ImageTagAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The layer digest calculation performed by Amazon ECR upon receipt of the image -// layer does not match the digest specified. +// The layer digest calculation performed by Amazon ECR when the image layer +// doesn't match the digest specified. type InvalidLayerException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -124,14 +159,21 @@ func (e *InvalidLayerException) ErrorMessage() string { } return *e.Message } -func (e *InvalidLayerException) ErrorCode() string { return "InvalidLayerException" } +func (e *InvalidLayerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidLayerException" + } + return *e.ErrorCodeOverride +} func (e *InvalidLayerException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The layer part size is not valid, or the first byte specified is not consecutive +// The layer part size isn't valid, or the first byte specified isn't consecutive // to the last byte of a previous layer part upload. type InvalidLayerPartException struct { Message *string + ErrorCodeOverride *string + RegistryId *string RepositoryName *string UploadId *string @@ -149,7 +191,12 @@ func (e *InvalidLayerPartException) ErrorMessage() string { } return *e.Message } -func (e *InvalidLayerPartException) ErrorCode() string { return "InvalidLayerPartException" } +func (e *InvalidLayerPartException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidLayerPartException" + } + return *e.ErrorCodeOverride +} func (e *InvalidLayerPartException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The specified parameter is invalid. Review the available parameters for the API @@ -157,6 +204,8 @@ func (e *InvalidLayerPartException) ErrorFault() smithy.ErrorFault { return smit type InvalidParameterException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -169,7 +218,12 @@ func (e *InvalidParameterException) ErrorMessage() string { } return *e.Message } -func (e *InvalidParameterException) ErrorCode() string { return "InvalidParameterException" } +func (e *InvalidParameterException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidParameterException" + } + return *e.ErrorCodeOverride +} func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // An invalid parameter has been specified. Tag keys can have a maximum character @@ -178,6 +232,8 @@ func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault { return smit type InvalidTagParameterException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -190,13 +246,20 @@ func (e *InvalidTagParameterException) ErrorMessage() string { } return *e.Message } -func (e *InvalidTagParameterException) ErrorCode() string { return "InvalidTagParameterException" } +func (e *InvalidTagParameterException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidTagParameterException" + } + return *e.ErrorCodeOverride +} func (e *InvalidTagParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The image layer already exists in the associated repository. type LayerAlreadyExistsException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -209,13 +272,20 @@ func (e *LayerAlreadyExistsException) ErrorMessage() string { } return *e.Message } -func (e *LayerAlreadyExistsException) ErrorCode() string { return "LayerAlreadyExistsException" } +func (e *LayerAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LayerAlreadyExistsException" + } + return *e.ErrorCodeOverride +} func (e *LayerAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // Layer parts must be at least 5 MiB in size. type LayerPartTooSmallException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -228,14 +298,21 @@ func (e *LayerPartTooSmallException) ErrorMessage() string { } return *e.Message } -func (e *LayerPartTooSmallException) ErrorCode() string { return "LayerPartTooSmallException" } +func (e *LayerPartTooSmallException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LayerPartTooSmallException" + } + return *e.ErrorCodeOverride +} func (e *LayerPartTooSmallException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The specified layers could not be found, or the specified layer is not valid for +// The specified layers can't be found, or the specified layer isn't valid for // this repository. type LayersNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -248,16 +325,22 @@ func (e *LayersNotFoundException) ErrorMessage() string { } return *e.Message } -func (e *LayersNotFoundException) ErrorCode() string { return "LayersNotFoundException" } +func (e *LayersNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LayersNotFoundException" + } + return *e.ErrorCodeOverride +} func (e *LayersNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The operation did not succeed because it would have exceeded a service limit for -// your account. For more information, see Amazon ECR Service Quotas -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) in -// the Amazon Elastic Container Registry User Guide. +// The operation didn't succeed because it would have exceeded a service limit for +// your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) +// in the Amazon Elastic Container Registry User Guide. type LimitExceededException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -270,13 +353,20 @@ func (e *LimitExceededException) ErrorMessage() string { } return *e.Message } -func (e *LimitExceededException) ErrorCode() string { return "LimitExceededException" } +func (e *LimitExceededException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LimitExceededException" + } + return *e.ErrorCodeOverride +} func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The manifest list is referencing an image that does not exist. +// The manifest list is referencing an image that doesn't exist. type ReferencedImagesNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -290,14 +380,19 @@ func (e *ReferencedImagesNotFoundException) ErrorMessage() string { return *e.Message } func (e *ReferencedImagesNotFoundException) ErrorCode() string { - return "ReferencedImagesNotFoundException" + if e == nil || e.ErrorCodeOverride == nil { + return "ReferencedImagesNotFoundException" + } + return *e.ErrorCodeOverride } func (e *ReferencedImagesNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The registry does not exist. +// The registry doesn't exist. type RegistryNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -310,13 +405,20 @@ func (e *RegistryNotFoundException) ErrorMessage() string { } return *e.Message } -func (e *RegistryNotFoundException) ErrorCode() string { return "RegistryNotFoundException" } +func (e *RegistryNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RegistryNotFoundException" + } + return *e.ErrorCodeOverride +} func (e *RegistryNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The specified repository already exists in the specified registry. type RepositoryAlreadyExistsException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -330,15 +432,48 @@ func (e *RepositoryAlreadyExistsException) ErrorMessage() string { return *e.Message } func (e *RepositoryAlreadyExistsException) ErrorCode() string { - return "RepositoryAlreadyExistsException" + if e == nil || e.ErrorCodeOverride == nil { + return "RepositoryAlreadyExistsException" + } + return *e.ErrorCodeOverride } func (e *RepositoryAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// The repository catalog data doesn't exist. +type RepositoryCatalogDataNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *RepositoryCatalogDataNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RepositoryCatalogDataNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RepositoryCatalogDataNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RepositoryCatalogDataNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *RepositoryCatalogDataNotFoundException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + // The specified repository contains images. To delete a repository that contains // images, you must force the deletion with the force parameter. type RepositoryNotEmptyException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -351,15 +486,21 @@ func (e *RepositoryNotEmptyException) ErrorMessage() string { } return *e.Message } -func (e *RepositoryNotEmptyException) ErrorCode() string { return "RepositoryNotEmptyException" } +func (e *RepositoryNotEmptyException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RepositoryNotEmptyException" + } + return *e.ErrorCodeOverride +} func (e *RepositoryNotEmptyException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The specified repository could not be found. Check the spelling of the specified -// repository and ensure that you are performing operations on the correct -// registry. +// The specified repository can't be found. Check the spelling of the specified +// repository and ensure that you're performing operations on the correct registry. type RepositoryNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -372,14 +513,21 @@ func (e *RepositoryNotFoundException) ErrorMessage() string { } return *e.Message } -func (e *RepositoryNotFoundException) ErrorCode() string { return "RepositoryNotFoundException" } +func (e *RepositoryNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RepositoryNotFoundException" + } + return *e.ErrorCodeOverride +} func (e *RepositoryNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The specified repository and registry combination does not have an associated +// The specified repository and registry combination doesn't have an associated // repository policy. type RepositoryPolicyNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -393,7 +541,10 @@ func (e *RepositoryPolicyNotFoundException) ErrorMessage() string { return *e.Message } func (e *RepositoryPolicyNotFoundException) ErrorCode() string { - return "RepositoryPolicyNotFoundException" + if e == nil || e.ErrorCodeOverride == nil { + return "RepositoryPolicyNotFoundException" + } + return *e.ErrorCodeOverride } func (e *RepositoryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } @@ -401,6 +552,8 @@ func (e *RepositoryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { ret type ServerException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -413,14 +566,21 @@ func (e *ServerException) ErrorMessage() string { } return *e.Message } -func (e *ServerException) ErrorCode() string { return "ServerException" } +func (e *ServerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ServerException" + } + return *e.ErrorCodeOverride +} func (e *ServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } -// The list of tags on the repository is over the limit. The maximum number of tags -// that can be applied to a repository is 50. +// The list of tags on the repository is over the limit. The maximum number of +// tags that can be applied to a repository is 50. type TooManyTagsException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -433,13 +593,20 @@ func (e *TooManyTagsException) ErrorMessage() string { } return *e.Message } -func (e *TooManyTagsException) ErrorCode() string { return "TooManyTagsException" } +func (e *TooManyTagsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TooManyTagsException" + } + return *e.ErrorCodeOverride +} func (e *TooManyTagsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The action is not supported in this Region. +// The action isn't supported in this Region. type UnsupportedCommandException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -452,14 +619,21 @@ func (e *UnsupportedCommandException) ErrorMessage() string { } return *e.Message } -func (e *UnsupportedCommandException) ErrorCode() string { return "UnsupportedCommandException" } +func (e *UnsupportedCommandException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnsupportedCommandException" + } + return *e.ErrorCodeOverride +} func (e *UnsupportedCommandException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The upload could not be found, or the specified upload ID is not valid for this +// The upload can't be found, or the specified upload ID isn't valid for this // repository. type UploadNotFoundException struct { Message *string + ErrorCodeOverride *string + noSmithyDocumentSerde } @@ -472,5 +646,10 @@ func (e *UploadNotFoundException) ErrorMessage() string { } return *e.Message } -func (e *UploadNotFoundException) ErrorCode() string { return "UploadNotFoundException" } +func (e *UploadNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UploadNotFoundException" + } + return *e.ErrorCodeOverride +} func (e *UploadNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/types.go index 731ea603..b24a2f4e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/types.go @@ -10,9 +10,9 @@ import ( // An authorization token data object that corresponds to a public registry. type AuthorizationData struct { - // A base64-encoded string that contains authorization data for a public Amazon ECR - // registry. When the string is decoded, it is presented in the format - // user:password for public registry authentication using docker login. + // A base64-encoded string that contains authorization data for a public Amazon + // ECR registry. When the string is decoded, it's presented in the format + // user:password for public registry authentication using docker login . AuthorizationToken *string // The Unix time in seconds and milliseconds when the authorization token expires. @@ -22,28 +22,29 @@ type AuthorizationData struct { noSmithyDocumentSerde } -// An object representing an Amazon ECR image. +// An object that represents an Amazon ECR image. type Image struct { - // An object containing the image tag and image digest associated with an image. + // An object that contains the image tag and image digest associated with an image. ImageId *ImageIdentifier - // The image manifest associated with the image. + // The image manifest that's associated with the image. ImageManifest *string // The manifest media type of the image. ImageManifestMediaType *string - // The AWS account ID associated with the registry containing the image. + // The Amazon Web Services account ID that's associated with the registry + // containing the image. RegistryId *string - // The name of the repository associated with the image. + // The name of the repository that's associated with the image. RepositoryName *string noSmithyDocumentSerde } -// An object that describes an image returned by a DescribeImages operation. +// An object that describes an image that's returned by a DescribeImages operation. type ImageDetail struct { // The artifact media type of the image. @@ -55,41 +56,41 @@ type ImageDetail struct { // The media type of the image manifest. ImageManifestMediaType *string - // The date and time, expressed in standard JavaScript date format, at which the - // current image was pushed to the repository. + // The date and time, expressed in standard JavaScript date format, that the + // current image was pushed to the repository at. ImagePushedAt *time.Time // The size, in bytes, of the image in the repository. If the image is a manifest - // list, this will be the max size of all manifests in the list. Beginning with - // Docker version 1.9, the Docker client compresses image layers before pushing - // them to a V2 Docker registry. The output of the docker images command shows the - // uncompressed image size, so it may return a larger image size than the image - // sizes returned by DescribeImages. + // list, this is the max size of all manifests in the list. Beginning with Docker + // version 1.9, the Docker client compresses image layers before pushing them to a + // V2 Docker registry. The output of the docker images command shows the + // uncompressed image size, so it might return a larger image size than the image + // sizes that are returned by DescribeImages . ImageSizeInBytes *int64 - // The list of tags associated with this image. + // The list of tags that's associated with this image. ImageTags []string - // The AWS account ID associated with the public registry to which this image - // belongs. + // The Amazon Web Services account ID that's associated with the public registry + // where this image belongs. RegistryId *string - // The name of the repository to which this image belongs. + // The name of the repository where this image belongs. RepositoryName *string noSmithyDocumentSerde } -// An object representing an Amazon ECR image failure. +// An object that represents an Amazon ECR image failure. type ImageFailure struct { - // The code associated with the failure. + // The code that's associated with the failure. FailureCode ImageFailureCode // The reason for the failure. FailureReason *string - // The image ID associated with the failure. + // The image ID that's associated with the failure. ImageId *ImageIdentifier noSmithyDocumentSerde @@ -101,28 +102,28 @@ type ImageIdentifier struct { // The sha256 digest of the image manifest. ImageDigest *string - // The tag used for the image. + // The tag that's used for the image. ImageTag *string noSmithyDocumentSerde } -// An object representing the image tag details for an image. +// An object that represents the image tag details for an image. type ImageTagDetail struct { - // The time stamp indicating when the image tag was created. + // The time stamp that indicates when the image tag was created. CreatedAt *time.Time // An object that describes the details of an image. ImageDetail *ReferencedImageDetail - // The tag associated with the image. + // The tag that's associated with the image. ImageTag *string noSmithyDocumentSerde } -// An object representing an Amazon ECR image layer. +// An object that represents an Amazon ECR image layer. type Layer struct { // The availability status of the image layer. @@ -136,29 +137,29 @@ type Layer struct { // The media type of the layer, such as // application/vnd.docker.image.rootfs.diff.tar.gzip or - // application/vnd.oci.image.layer.v1.tar+gzip. + // application/vnd.oci.image.layer.v1.tar+gzip . MediaType *string noSmithyDocumentSerde } -// An object representing an Amazon ECR image layer failure. +// An object that represents an Amazon ECR image layer failure. type LayerFailure struct { - // The failure code associated with the failure. + // The failure code that's associated with the failure. FailureCode LayerFailureCode // The reason for the failure. FailureReason *string - // The layer digest associated with the failure. + // The layer digest that's associated with the failure. LayerDigest *string noSmithyDocumentSerde } -// An object that describes the image tag details returned by a DescribeImageTags -// action. +// An object that describes the image tag details that are returned by a +// DescribeImageTags action. type ReferencedImageDetail struct { // The artifact media type of the image. @@ -170,16 +171,16 @@ type ReferencedImageDetail struct { // The media type of the image manifest. ImageManifestMediaType *string - // The date and time, expressed in standard JavaScript date format, at which the - // current image tag was pushed to the repository. + // The date and time, expressed in standard JavaScript date format, which the + // current image tag was pushed to the repository at. ImagePushedAt *time.Time // The size, in bytes, of the image in the repository. If the image is a manifest - // list, this will be the max size of all manifests in the list. Beginning with - // Docker version 1.9, the Docker client compresses image layers before pushing - // them to a V2 Docker registry. The output of the docker images command shows the - // uncompressed image size, so it may return a larger image size than the image - // sizes returned by DescribeImages. + // list, this is the max size of all manifests in the list. Beginning with Docker + // version 1.9, the Docker client compresses image layers before pushing them to a + // V2 Docker registry. The output of the docker images command shows the + // uncompressed image size, so it might return a larger image size than the image + // sizes that are returned by DescribeImages . ImageSizeInBytes *int64 noSmithyDocumentSerde @@ -188,7 +189,7 @@ type ReferencedImageDetail struct { // The details of a public registry. type Registry struct { - // An array of objects representing the aliases for a public registry. + // An array of objects that represents the aliases for a public registry. // // This member is required. Aliases []RegistryAlias @@ -198,8 +199,8 @@ type Registry struct { // This member is required. RegistryArn *string - // The AWS account ID associated with the registry. If you do not specify a - // registry, the default public registry is assumed. + // The Amazon Web Services account ID that's associated with the registry. If you + // do not specify a registry, the default public registry is assumed. // // This member is required. RegistryId *string @@ -210,9 +211,9 @@ type Registry struct { // This member is required. RegistryUri *string - // Whether the account is verified. This indicates whether the account is an AWS - // Marketplace vendor. If an account is verified, each public repository will - // received a verified account badge on the Amazon ECR Public Gallery. + // Indicates whether the account is a verified Amazon Web Services Marketplace + // vendor. If an account is verified, each public repository receives a verified + // account badge on the Amazon ECR Public Gallery. // // This member is required. Verified *bool @@ -221,14 +222,13 @@ type Registry struct { } // An object representing the aliases for a public registry. A public registry is -// given an alias upon creation but a custom alias can be set using the Amazon ECR -// console. For more information, see Registries -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html) in the -// Amazon Elastic Container Registry User Guide. +// given an alias when it's created. However, a custom alias can be set using the +// Amazon ECR console. For more information, see Registries (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html) +// in the Amazon Elastic Container Registry User Guide. type RegistryAlias struct { - // Whether or not the registry alias is the default alias for the registry. When - // the first public repository is created, your public registry is assigned a + // Indicates whether the registry alias is the default alias for the registry. + // When the first public repository is created, your public registry is assigned a // default registry alias. // // This member is required. @@ -239,10 +239,10 @@ type RegistryAlias struct { // This member is required. Name *string - // Whether or not the registry alias is the primary alias for the registry. If + // Indicates whether the registry alias is the primary alias for the registry. If // true, the alias is the primary registry alias and is displayed in both the // repository URL and the image URI used in the docker pull commands on the Amazon - // ECR Public Gallery. A registry alias that is not the primary registry alias can + // ECR Public Gallery. A registry alias that isn't the primary registry alias can // be used in the repository URI in a docker pull command. // // This member is required. @@ -273,14 +273,14 @@ type Repository struct { // The date and time, in JavaScript date format, when the repository was created. CreatedAt *time.Time - // The AWS account ID associated with the public registry that contains the - // repository. + // The Amazon Web Services account ID that's associated with the public registry + // that contains the repository. RegistryId *string // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains - // the arn:aws:ecr namespace, followed by the region of the repository, AWS account - // ID of the repository owner, repository namespace, and repository name. For - // example, arn:aws:ecr:region:012345678910:repository/test. + // the arn:aws:ecr namespace, followed by the region of the repository, Amazon Web + // Services account ID of the repository owner, repository namespace, and + // repository name. For example, arn:aws:ecr:region:012345678910:repository/test . RepositoryArn *string // The name of the repository. @@ -297,27 +297,28 @@ type Repository struct { // ECR Public Gallery. type RepositoryCatalogData struct { - // The longform description of the contents of the repository. This text appears in - // the repository details on the Amazon ECR Public Gallery. + // The longform description of the contents of the repository. This text appears + // in the repository details on the Amazon ECR Public Gallery. AboutText *string // The architecture tags that are associated with the repository. Only supported // operating system tags appear publicly in the Amazon ECR Public Gallery. For more - // information, see RepositoryCatalogDataInput. + // information, see RepositoryCatalogDataInput . Architectures []string // The short description of the repository. Description *string - // The URL containing the logo associated with the repository. + // The URL that contains the logo that's associated with the repository. LogoUrl *string - // Whether or not the repository is certified by AWS Marketplace. + // Indicates whether the repository is certified by Amazon Web Services + // Marketplace. MarketplaceCertified *bool // The operating system tags that are associated with the repository. Only // supported operating system tags appear publicly in the Amazon ECR Public - // Gallery. For more information, see RepositoryCatalogDataInput. + // Gallery. For more information, see RepositoryCatalogDataInput . OperatingSystems []string // The longform usage details of the contents of the repository. The usage text @@ -327,56 +328,46 @@ type RepositoryCatalogData struct { noSmithyDocumentSerde } -// An object containing the catalog data for a repository. This data is publicly -// visible in the Amazon ECR Public Gallery. +// An object that contains the catalog data for a repository. This data is +// publicly visible in the Amazon ECR Public Gallery. type RepositoryCatalogDataInput struct { - // A detailed description of the contents of the repository. It is publicly visible + // A detailed description of the contents of the repository. It's publicly visible // in the Amazon ECR Public Gallery. The text must be in markdown format. AboutText *string // The system architecture that the images in the repository are compatible with. - // On the Amazon ECR Public Gallery, the following supported architectures will - // appear as badges on the repository and are used as search filters. - // - // * Linux - // - // * - // Windows - // - // If an unsupported tag is added to your repository catalog data, it will - // be associated with the repository and can be retrieved using the API but will - // not be discoverable in the Amazon ECR Public Gallery. + // On the Amazon ECR Public Gallery, the following supported architectures appear + // as badges on the repository and are used as search filters. If an unsupported + // tag is added to your repository catalog data, it's associated with the + // repository and can be retrieved using the API but isn't discoverable in the + // Amazon ECR Public Gallery. + // - ARM + // - ARM 64 + // - x86 + // - x86-64 Architectures []string - // A short description of the contents of the repository. This text appears in both - // the image details and also when searching for repositories on the Amazon ECR - // Public Gallery. + // A short description of the contents of the repository. This text appears in + // both the image details and also when searching for repositories on the Amazon + // ECR Public Gallery. Description *string - // The base64-encoded repository logo payload. The repository logo is only publicly - // visible in the Amazon ECR Public Gallery for verified accounts. + // The base64-encoded repository logo payload. The repository logo is only + // publicly visible in the Amazon ECR Public Gallery for verified accounts. LogoImageBlob []byte // The operating systems that the images in the repository are compatible with. On - // the Amazon ECR Public Gallery, the following supported operating systems will - // appear as badges on the repository and are used as search filters. - // - // * ARM - // - // * ARM - // 64 - // - // * x86 - // - // * x86-64 - // - // If an unsupported tag is added to your repository catalog - // data, it will be associated with the repository and can be retrieved using the - // API but will not be discoverable in the Amazon ECR Public Gallery. + // the Amazon ECR Public Gallery, the following supported operating systems appear + // as badges on the repository and are used as search filters. If an unsupported + // tag is added to your repository catalog data, it's associated with the + // repository and can be retrieved using the API but isn't discoverable in the + // Amazon ECR Public Gallery. + // - Linux + // - Windows OperatingSystems []string - // Detailed information on how to use the contents of the repository. It is + // Detailed information about how to use the contents of the repository. It's // publicly visible in the Amazon ECR Public Gallery. The usage text provides // context, support information, and additional usage details for users of the // repository. The text must be in markdown format. @@ -386,9 +377,9 @@ type RepositoryCatalogDataInput struct { } // The metadata that you apply to a resource to help you categorize and organize -// them. Each tag consists of a key and an optional value, both of which you -// define. Tag keys can have a maximum character length of 128 characters, and tag -// values can have a maximum length of 256 characters. +// them. Each tag consists of a key and an optional value. You define both. Tag +// keys can have a maximum character length of 128 characters, and tag values can +// have a maximum length of 256 characters. type Tag struct { // One part of a key-value pair that make up a tag. A key is a general label that diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index ae9ae243..c60858a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,31 @@ +# v1.9.28 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.27 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.26 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.25 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.9.21 (2022-12-15) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index c49853b9..ea2621e4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.21" +const goModuleVersion = "1.9.28" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md index 77368fac..d713c5ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md @@ -1,3 +1,72 @@ +# v1.22.2 (2023-06-15) + +* No change notes available for this release. + +# v1.22.1 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.0 (2023-06-05) + +* **Feature**: This release includes feature to import customer's asymmetric (RSA and ECC) and HMAC keys into KMS. It also includes feature to allow customers to specify number of days to schedule a KMS key deletion as a policy condition key. + +# v1.21.1 (2023-05-04) + +* No change notes available for this release. + +# v1.21.0 (2023-05-01) + +* **Feature**: This release makes the NitroEnclave request parameter Recipient and the response field for CiphertextForRecipient available in AWS SDKs. It also adds the regex pattern for CloudHsmClusterId validation. + +# v1.20.12 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.11 (2023-04-20) + +* No change notes available for this release. + +# v1.20.10 (2023-04-10) + +* No change notes available for this release. + +# v1.20.9 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.8 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.7 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.6 (2023-02-28) + +* **Documentation**: AWS KMS is deprecating the RSAES_PKCS1_V1_5 wrapping algorithm option in the GetParametersForImport API that is used in the AWS KMS Import Key Material feature. AWS KMS will end support for this wrapping algorithm by October 1, 2023. + +# v1.20.5 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.20.4 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.3 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.20.2 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.1 (2023-01-23) + +* No change notes available for this release. + # v1.20.0 (2023-01-05) * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go index e56ae816..47a7a75a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go @@ -115,7 +115,7 @@ type Options struct { Retryer aws.Retryer // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You // should not populate this structure programmatically, or rely on the values here // within your applications. RuntimeEnvironment aws.RuntimeEnvironment diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go index ee03cf10..e43dce20 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go @@ -11,17 +11,15 @@ import ( ) // Cancels the deletion of a KMS key. When this operation succeeds, the key state -// of the KMS key is Disabled. To enable the KMS key, use EnableKey. For more +// of the KMS key is Disabled . To enable the KMS key, use EnableKey . For more // information about scheduling and canceling deletion of a KMS key, see Deleting -// KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) in -// the Key Management Service Developer Guide. The KMS key that you use for this +// KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) +// in the Key Management Service Developer Guide. The KMS key that you use for this // operation must be in a compatible key state. For details, see Key states of KMS // keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in // the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. -// Required permissions: kms:CancelKeyDeletion -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:CancelKeyDeletion (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: ScheduleKeyDeletion func (c *Client) CancelKeyDeletion(ctx context.Context, params *CancelKeyDeletionInput, optFns ...func(*Options)) (*CancelKeyDeletionOutput, error) { if params == nil { @@ -42,15 +40,10 @@ type CancelKeyDeletionInput struct { // Identifies the KMS key whose deletion is being canceled. Specify the key ID or // key ARN of the KMS key. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -60,9 +53,8 @@ type CancelKeyDeletionInput struct { type CancelKeyDeletionOutput struct { - // The Amazon Resource Name (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key whose deletion is canceled. + // The Amazon Resource Name ( key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) of the KMS key whose deletion is canceled. KeyId *string // Metadata pertaining to the operation's result. @@ -122,6 +114,9 @@ func (c *Client) addOperationCancelKeyDeletionMiddlewares(stack *middleware.Stac if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCancelKeyDeletion(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go index 19fd8cad..a88af316 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go @@ -10,8 +10,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Connects or reconnects a custom key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// Connects or reconnects a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) // to its backing key store. For an CloudHSM key store, ConnectCustomKeyStore // connects the key store to its associated CloudHSM cluster. For an external key // store, ConnectCustomKeyStore connects the key store to the external key store @@ -24,14 +23,13 @@ import ( // HTTP 200 response and a JSON object with no properties. However, this response // does not indicate that the custom key store is connected. To get the connection // state of the custom key store, use the DescribeCustomKeyStores operation. This -// operation is part of the custom key stores -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) // feature in KMS, which combines the convenience and extensive integration of KMS // with the isolation and control of a key store that you own and manage. The // ConnectCustomKeyStore operation might fail for various reasons. To find the // reason, use the DescribeCustomKeyStores operation and see the // ConnectionErrorCode in the response. For help interpreting the -// ConnectionErrorCode, see CustomKeyStoresListEntry. To fix the failure, use the +// ConnectionErrorCode , see CustomKeyStoresListEntry . To fix the failure, use the // DisconnectCustomKeyStore operation to disconnect the custom key store, correct // the error, use the UpdateCustomKeyStore operation if necessary, and then use // ConnectCustomKeyStore again. CloudHSM key store During the connection process @@ -40,47 +38,34 @@ import ( // the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates its // password. To connect an CloudHSM key store, its associated CloudHSM cluster must // have at least one active HSM. To get the number of active HSMs in a cluster, use -// the DescribeClusters -// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) -// operation. To add HSMs to the cluster, use the CreateHsm -// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. Also, the kmsuser crypto user -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) +// the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) +// operation. To add HSMs to the cluster, use the CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) +// operation. Also, the kmsuser crypto user (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) // (CU) must not be logged into the cluster. This prevents KMS from using this // account to log in. If you are having trouble connecting or disconnecting a -// CloudHSM key store, see Troubleshooting an CloudHSM key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) in the -// Key Management Service Developer Guide. External key store When you connect an -// external key store that uses public endpoint connectivity, KMS tests its ability -// to communicate with your external key manager by sending a request via the -// external key store proxy. When you connect to an external key store that uses -// VPC endpoint service connectivity, KMS establishes the networking elements that -// it needs to communicate with your external key manager via the external key +// CloudHSM key store, see Troubleshooting an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) +// in the Key Management Service Developer Guide. External key store When you +// connect an external key store that uses public endpoint connectivity, KMS tests +// its ability to communicate with your external key manager by sending a request +// via the external key store proxy. When you connect to an external key store that +// uses VPC endpoint service connectivity, KMS establishes the networking elements +// that it needs to communicate with your external key manager via the external key // store proxy. This includes creating an interface endpoint to the VPC endpoint // service and a private hosted zone for traffic between KMS and the VPC endpoint // service. To connect an external key store, KMS must be able to connect to the // external key store proxy, the external key store proxy must be able to // communicate with your external key manager, and the external key manager must be // available for cryptographic operations. If you are having trouble connecting or -// disconnecting an external key store, see Troubleshooting an external key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html) +// disconnecting an external key store, see Troubleshooting an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html) // in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a custom key store in a different Amazon Web Services -// account. Required permissions: kms:ConnectCustomKeyStore -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// account. Required permissions: kms:ConnectCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (IAM policy) Related operations -// -// * CreateCustomKeyStore -// -// * -// DeleteCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * DisconnectCustomKeyStore -// -// * -// UpdateCustomKeyStore +// - CreateCustomKeyStore +// - DeleteCustomKeyStore +// - DescribeCustomKeyStores +// - DisconnectCustomKeyStore +// - UpdateCustomKeyStore func (c *Client) ConnectCustomKeyStore(ctx context.Context, params *ConnectCustomKeyStoreInput, optFns ...func(*Options)) (*ConnectCustomKeyStoreOutput, error) { if params == nil { params = &ConnectCustomKeyStoreInput{} @@ -98,8 +83,8 @@ func (c *Client) ConnectCustomKeyStore(ctx context.Context, params *ConnectCusto type ConnectCustomKeyStoreInput struct { - // Enter the key store ID of the custom key store that you want to connect. To find - // the ID of a custom key store, use the DescribeCustomKeyStores operation. + // Enter the key store ID of the custom key store that you want to connect. To + // find the ID of a custom key store, use the DescribeCustomKeyStores operation. // // This member is required. CustomKeyStoreId *string @@ -165,6 +150,9 @@ func (c *Client) addOperationConnectCustomKeyStoreMiddlewares(stack *middleware. if err = stack.Initialize.Add(newServiceMetadataMiddleware_opConnectCustomKeyStore(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go index a4618675..0030cb49 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go @@ -11,48 +11,36 @@ import ( ) // Creates a friendly name for a KMS key. Adding, deleting, or updating an alias -// can allow or deny permission to the KMS key. For details, see ABAC for KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key -// Management Service Developer Guide. You can use an alias to identify a KMS key -// in the KMS console, in the DescribeKey operation and in cryptographic operations -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations), -// such as Encrypt and GenerateDataKey. You can also change the KMS key that's -// associated with the alias (UpdateAlias) or delete the alias (DeleteAlias) at any -// time. These operations don't affect the underlying KMS key. You can associate -// the alias with any customer managed key in the same Amazon Web Services Region. -// Each alias is associated with only one KMS key at a time, but a KMS key can have -// multiple aliases. A valid KMS key is required. You can't create an alias without -// a KMS key. The alias must be unique in the account and Region, but you can have -// aliases with the same name in different Regions. For detailed information about -// aliases, see Using aliases -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html) in the -// Key Management Service Developer Guide. This operation does not return a +// can allow or deny permission to the KMS key. For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) +// in the Key Management Service Developer Guide. You can use an alias to identify +// a KMS key in the KMS console, in the DescribeKey operation and in cryptographic +// operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// , such as Encrypt and GenerateDataKey . You can also change the KMS key that's +// associated with the alias ( UpdateAlias ) or delete the alias ( DeleteAlias ) at +// any time. These operations don't affect the underlying KMS key. You can +// associate the alias with any customer managed key in the same Amazon Web +// Services Region. Each alias is associated with only one KMS key at a time, but a +// KMS key can have multiple aliases. A valid KMS key is required. You can't create +// an alias without a KMS key. The alias must be unique in the account and Region, +// but you can have aliases with the same name in different Regions. For detailed +// information about aliases, see Using aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html) +// in the Key Management Service Developer Guide. This operation does not return a // response. To get the alias that you created, use the ListAliases operation. The // KMS key that you use for this operation must be in a compatible key state. For -// details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: No. You cannot +// details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on an alias in a different Amazon Web Services account. // Required permissions +// - kms:CreateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the alias (IAM policy). +// - kms:CreateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the KMS key (key policy). // -// * kms:CreateAlias -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the alias (IAM policy). -// -// * kms:CreateAlias -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the KMS key (key policy). -// -// For details, see Controlling access to aliases -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) +// For details, see Controlling access to aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) // in the Key Management Service Developer Guide. Related operations: -// -// * -// DeleteAlias -// -// * ListAliases -// -// * UpdateAlias +// - DeleteAlias +// - ListAliases +// - UpdateAlias func (c *Client) CreateAlias(ctx context.Context, params *CreateAliasInput, optFns ...func(*Options)) (*CreateAliasOutput, error) { if params == nil { params = &CreateAliasInput{} @@ -71,32 +59,27 @@ func (c *Client) CreateAlias(ctx context.Context, params *CreateAliasInput, optF type CreateAliasInput struct { // Specifies the alias name. This value must begin with alias/ followed by a name, - // such as alias/ExampleAlias. The AliasName value must be string of 1-256 + // such as alias/ExampleAlias . Do not include confidential or sensitive + // information in this field. This field may be displayed in plaintext in + // CloudTrail logs and other output. The AliasName value must be string of 1-256 // characters. It can contain only alphanumeric characters, forward slashes (/), - // underscores (_), and dashes (-). The alias name cannot begin with alias/aws/. - // The alias/aws/ prefix is reserved for Amazon Web Services managed keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). + // underscores (_), and dashes (-). The alias name cannot begin with alias/aws/ . + // The alias/aws/ prefix is reserved for Amazon Web Services managed keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) + // . // // This member is required. AliasName *string - // Associates the alias with the specified customer managed key - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). - // The KMS key must be in the same Amazon Web Services Region. A valid key ID is + // Associates the alias with the specified customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) + // . The KMS key must be in the same Amazon Web Services Region. A valid key ID is // required. If you supply a null or empty string value, this operation returns an - // error. For help finding the key ID and ARN, see Finding the Key ID and ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html#find-cmk-id-arn) + // error. For help finding the key ID and ARN, see Finding the Key ID and ARN (https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html#find-cmk-id-arn) // in the Key Management Service Developer Guide . Specify the key ID or key ARN of // the KMS key. For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key - // ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. TargetKeyId *string @@ -162,6 +145,9 @@ func (c *Client) addOperationCreateAliasMiddlewares(stack *middleware.Stack, opt if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateAlias(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go index 9e2552c3..1312b1bd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go @@ -11,83 +11,63 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a custom key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// Creates a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) // backed by a key store that you own and manage. When you use a KMS key in a // custom key store for a cryptographic operation, the cryptographic operation is // actually performed in your key store using your keys. KMS supports CloudHSM key -// stores -// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html) -// backed by an CloudHSM cluster -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/clusters.html) and -// external key stores -// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html) +// stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html) +// backed by an CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/clusters.html) +// and external key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html) // backed by an external key store proxy and external key manager outside of Amazon -// Web Services. This operation is part of the custom key stores -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// Web Services. This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) // feature in KMS, which combines the convenience and extensive integration of KMS // with the isolation and control of a key store that you own and manage. Before // you create the custom key store, the required elements must be in place and // operational. We recommend that you use the test tools that KMS provides to // verify the configuration your external key store proxy. For details about the // required elements and verification tests, see Assemble the prerequisites (for -// CloudHSM key stores) -// (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) -// or Assemble the prerequisites (for external key stores) -// (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements) +// CloudHSM key stores) (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) +// or Assemble the prerequisites (for external key stores) (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements) // in the Key Management Service Developer Guide. To create a custom key store, use // the following parameters. +// - To create an CloudHSM key store, specify the CustomKeyStoreName , +// CloudHsmClusterId , KeyStorePassword , and TrustAnchorCertificate . The +// CustomKeyStoreType parameter is optional for CloudHSM key stores. If you +// include it, set it to the default value, AWS_CLOUDHSM . For help with +// failures, see Troubleshooting an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) +// in the Key Management Service Developer Guide. +// - To create an external key store, specify the CustomKeyStoreName and a +// CustomKeyStoreType of EXTERNAL_KEY_STORE . Also, specify values for +// XksProxyConnectivity , XksProxyAuthenticationCredential , XksProxyUriEndpoint +// , and XksProxyUriPath . If your XksProxyConnectivity value is +// VPC_ENDPOINT_SERVICE , specify the XksProxyVpcEndpointServiceName parameter. +// For help with failures, see Troubleshooting an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html) +// in the Key Management Service Developer Guide. // -// * To create an CloudHSM key store, specify the -// CustomKeyStoreName, CloudHsmClusterId, KeyStorePassword, and -// TrustAnchorCertificate. The CustomKeyStoreType parameter is optional for -// CloudHSM key stores. If you include it, set it to the default value, -// AWS_CLOUDHSM. For help with failures, see Troubleshooting an CloudHSM key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) in the -// Key Management Service Developer Guide. -// -// * To create an external key store, -// specify the CustomKeyStoreName and a CustomKeyStoreType of EXTERNAL_KEY_STORE. -// Also, specify values for XksProxyConnectivity, XksProxyAuthenticationCredential, -// XksProxyUriEndpoint, and XksProxyUriPath. If your XksProxyConnectivity value is -// VPC_ENDPOINT_SERVICE, specify the XksProxyVpcEndpointServiceName parameter. For -// help with failures, see Troubleshooting an external key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html) -// in the Key Management Service Developer Guide. -// -// For external key stores: Some -// external key managers provide a simpler method for creating an external key -// store. For details, see your external key manager documentation. When creating -// an external key store in the KMS console, you can upload a JSON-based proxy -// configuration file with the desired values. You cannot use a proxy configuration -// with the CreateCustomKeyStore operation. However, you can use the values in the -// file to help you determine the correct values for the CreateCustomKeyStore -// parameters. When the operation completes successfully, it returns the ID of the -// new custom key store. Before you can use your new custom key store, you need to -// use the ConnectCustomKeyStore operation to connect a new CloudHSM key store to -// its CloudHSM cluster, or to connect a new external key store to the external key -// store proxy for your external key manager. Even if you are not going to use your -// custom key store immediately, you might want to connect it to verify that all -// settings are correct and then disconnect it until you are ready to use it. For -// help with failures, see Troubleshooting a custom key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) in the -// Key Management Service Developer Guide. Cross-account use: No. You cannot +// For external key stores: Some external key managers provide a simpler method +// for creating an external key store. For details, see your external key manager +// documentation. When creating an external key store in the KMS console, you can +// upload a JSON-based proxy configuration file with the desired values. You cannot +// use a proxy configuration with the CreateCustomKeyStore operation. However, you +// can use the values in the file to help you determine the correct values for the +// CreateCustomKeyStore parameters. When the operation completes successfully, it +// returns the ID of the new custom key store. Before you can use your new custom +// key store, you need to use the ConnectCustomKeyStore operation to connect a new +// CloudHSM key store to its CloudHSM cluster, or to connect a new external key +// store to the external key store proxy for your external key manager. Even if you +// are not going to use your custom key store immediately, you might want to +// connect it to verify that all settings are correct and then disconnect it until +// you are ready to use it. For help with failures, see Troubleshooting a custom +// key store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a custom key store in a different Amazon Web Services -// account. Required permissions: kms:CreateCustomKeyStore -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// account. Required permissions: kms:CreateCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (IAM policy). Related operations: -// -// * ConnectCustomKeyStore -// -// * -// DeleteCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * DisconnectCustomKeyStore -// -// * -// UpdateCustomKeyStore +// - ConnectCustomKeyStore +// - DeleteCustomKeyStore +// - DescribeCustomKeyStores +// - DisconnectCustomKeyStore +// - UpdateCustomKeyStore func (c *Client) CreateCustomKeyStore(ctx context.Context, params *CreateCustomKeyStoreInput, optFns ...func(*Options)) (*CreateCustomKeyStoreOutput, error) { if params == nil { params = &CreateCustomKeyStoreInput{} @@ -107,30 +87,30 @@ type CreateCustomKeyStoreInput struct { // Specifies a friendly name for the custom key store. The name must be unique in // your Amazon Web Services account and Region. This parameter is required for all - // custom key stores. + // custom key stores. Do not include confidential or sensitive information in this + // field. This field may be displayed in plaintext in CloudTrail logs and other + // output. // // This member is required. CustomKeyStoreName *string // Identifies the CloudHSM cluster for an CloudHSM key store. This parameter is - // required for custom key stores with CustomKeyStoreType of AWS_CLOUDHSM. Enter + // required for custom key stores with CustomKeyStoreType of AWS_CLOUDHSM . Enter // the cluster ID of any active CloudHSM cluster that is not already associated - // with a custom key store. To find the cluster ID, use the DescribeClusters - // (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) + // with a custom key store. To find the cluster ID, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) // operation. CloudHsmClusterId *string - // Specifies the type of custom key store. The default value is AWS_CLOUDHSM. For a - // custom key store backed by an CloudHSM cluster, omit the parameter or enter - // AWS_CLOUDHSM. For a custom key store backed by an external key manager outside - // of Amazon Web Services, enter EXTERNAL_KEY_STORE. You cannot change this + // Specifies the type of custom key store. The default value is AWS_CLOUDHSM . For + // a custom key store backed by an CloudHSM cluster, omit the parameter or enter + // AWS_CLOUDHSM . For a custom key store backed by an external key manager outside + // of Amazon Web Services, enter EXTERNAL_KEY_STORE . You cannot change this // property after the key store is created. CustomKeyStoreType types.CustomKeyStoreType // Specifies the kmsuser password for an CloudHSM key store. This parameter is - // required for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM. Enter - // the password of the kmsuser crypto user (CU) account - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) + // required for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM . Enter + // the password of the kmsuser crypto user (CU) account (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) // in the specified CloudHSM cluster. KMS logs into the cluster as this user to // manage key material on your behalf. The password must be a string of 7 to 32 // characters. Its value is case sensitive. This parameter tells KMS the kmsuser @@ -138,34 +118,33 @@ type CreateCustomKeyStoreInput struct { KeyStorePassword *string // Specifies the certificate for an CloudHSM key store. This parameter is required - // for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM. Enter the + // for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM . Enter the // content of the trust anchor certificate for the CloudHSM cluster. This is the // content of the customerCA.crt file that you created when you initialized the - // cluster - // (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html). + // cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html) + // . TrustAnchorCertificate *string // Specifies an authentication credential for the external key store proxy (XKS // proxy). This parameter is required for all custom key stores with a - // CustomKeyStoreType of EXTERNAL_KEY_STORE. The XksProxyAuthenticationCredential - // has two required elements: RawSecretAccessKey, a secret key, and AccessKeyId, a - // unique identifier for the RawSecretAccessKey. For character requirements, see - // XksProxyAuthenticationCredentialType. KMS uses this authentication credential to - // sign requests to the external key store proxy on your behalf. This credential is - // unrelated to Identity and Access Management (IAM) and Amazon Web Services + // CustomKeyStoreType of EXTERNAL_KEY_STORE . The XksProxyAuthenticationCredential + // has two required elements: RawSecretAccessKey , a secret key, and AccessKeyId , + // a unique identifier for the RawSecretAccessKey . For character requirements, see + // XksProxyAuthenticationCredentialType . KMS uses this authentication credential + // to sign requests to the external key store proxy on your behalf. This credential + // is unrelated to Identity and Access Management (IAM) and Amazon Web Services // credentials. This parameter doesn't set or change the authentication credentials // on the XKS proxy. It just tells KMS the credential that you established on your // external key store proxy. If you rotate your proxy authentication credential, // use the UpdateCustomKeyStore operation to provide the new credential to KMS. XksProxyAuthenticationCredential *types.XksProxyAuthenticationCredentialType - // Indicates how KMS communicates with the external key store proxy. This parameter - // is required for custom key stores with a CustomKeyStoreType of - // EXTERNAL_KEY_STORE. If the external key store proxy uses a public endpoint, - // specify PUBLIC_ENDPOINT. If the external key store proxy uses a Amazon VPC - // endpoint service for communication with KMS, specify VPC_ENDPOINT_SERVICE. For - // help making this choice, see Choosing a connectivity option - // (https://docs.aws.amazon.com/kms/latest/developerguide/plan-xks-keystore.html#choose-xks-connectivity) + // Indicates how KMS communicates with the external key store proxy. This + // parameter is required for custom key stores with a CustomKeyStoreType of + // EXTERNAL_KEY_STORE . If the external key store proxy uses a public endpoint, + // specify PUBLIC_ENDPOINT . If the external key store proxy uses a Amazon VPC + // endpoint service for communication with KMS, specify VPC_ENDPOINT_SERVICE . For + // help making this choice, see Choosing a connectivity option (https://docs.aws.amazon.com/kms/latest/developerguide/plan-xks-keystore.html#choose-xks-connectivity) // in the Key Management Service Developer Guide. An Amazon VPC endpoint service // keeps your communication with KMS in a private address space entirely within // Amazon Web Services, but it requires more configuration, including establishing @@ -181,58 +160,49 @@ type CreateCustomKeyStoreInput struct { // Specifies the endpoint that KMS uses to send requests to the external key store // proxy (XKS proxy). This parameter is required for custom key stores with a - // CustomKeyStoreType of EXTERNAL_KEY_STORE. The protocol must be HTTPS. KMS + // CustomKeyStoreType of EXTERNAL_KEY_STORE . The protocol must be HTTPS. KMS // communicates on port 443. Do not specify the port in the XksProxyUriEndpoint // value. For external key stores with XksProxyConnectivity value of - // VPC_ENDPOINT_SERVICE, specify https:// followed by the private DNS name of the - // VPC endpoint service. For external key stores with PUBLIC_ENDPOINT connectivity, - // this endpoint must be reachable before you create the custom key store. KMS - // connects to the external key store proxy while creating the custom key store. - // For external key stores with VPC_ENDPOINT_SERVICE connectivity, KMS connects - // when you call the ConnectCustomKeyStore operation. The value of this parameter - // must begin with https://. The remainder can contain upper and lower case letters - // (A-Z and a-z), numbers (0-9), dots (.), and hyphens (-). Additional slashes (/ - // and \) are not permitted. Uniqueness requirements: - // - // * The combined - // XksProxyUriEndpoint and XksProxyUriPath values must be unique in the Amazon Web - // Services account and Region. - // - // * An external key store with PUBLIC_ENDPOINT - // connectivity cannot use the same XksProxyUriEndpoint value as an external key - // store with VPC_ENDPOINT_SERVICE connectivity in the same Amazon Web Services - // Region. - // - // * Each external key store with VPC_ENDPOINT_SERVICE connectivity must - // have its own private DNS name. The XksProxyUriEndpoint value for external key - // stores with VPC_ENDPOINT_SERVICE connectivity (private DNS name) must be unique - // in the Amazon Web Services account and Region. + // VPC_ENDPOINT_SERVICE , specify https:// followed by the private DNS name of the + // VPC endpoint service. For external key stores with PUBLIC_ENDPOINT + // connectivity, this endpoint must be reachable before you create the custom key + // store. KMS connects to the external key store proxy while creating the custom + // key store. For external key stores with VPC_ENDPOINT_SERVICE connectivity, KMS + // connects when you call the ConnectCustomKeyStore operation. The value of this + // parameter must begin with https:// . The remainder can contain upper and lower + // case letters (A-Z and a-z), numbers (0-9), dots ( . ), and hyphens ( - ). + // Additional slashes ( / and \ ) are not permitted. Uniqueness requirements: + // - The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique + // in the Amazon Web Services account and Region. + // - An external key store with PUBLIC_ENDPOINT connectivity cannot use the same + // XksProxyUriEndpoint value as an external key store with VPC_ENDPOINT_SERVICE + // connectivity in the same Amazon Web Services Region. + // - Each external key store with VPC_ENDPOINT_SERVICE connectivity must have its + // own private DNS name. The XksProxyUriEndpoint value for external key stores + // with VPC_ENDPOINT_SERVICE connectivity (private DNS name) must be unique in + // the Amazon Web Services account and Region. XksProxyUriEndpoint *string // Specifies the base path to the proxy APIs for this external key store. To find // this value, see the documentation for your external key store proxy. This // parameter is required for all custom key stores with a CustomKeyStoreType of - // EXTERNAL_KEY_STORE. The value must start with / and must end with /kms/xks/v1 + // EXTERNAL_KEY_STORE . The value must start with / and must end with /kms/xks/v1 // where v1 represents the version of the KMS external key store proxy API. This // path can include an optional prefix between the required elements such as - // /prefix/kms/xks/v1. Uniqueness requirements: - // - // * The combined XksProxyUriEndpoint - // and XksProxyUriPath values must be unique in the Amazon Web Services account and - // Region. + // /prefix/kms/xks/v1 . Uniqueness requirements: + // - The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique + // in the Amazon Web Services account and Region. XksProxyUriPath *string // Specifies the name of the Amazon VPC endpoint service for interface endpoints // that is used to communicate with your external key store proxy (XKS proxy). This // parameter is required when the value of CustomKeyStoreType is EXTERNAL_KEY_STORE - // and the value of XksProxyConnectivity is VPC_ENDPOINT_SERVICE. The Amazon VPC - // endpoint service must fulfill all requirements - // (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements) + // and the value of XksProxyConnectivity is VPC_ENDPOINT_SERVICE . The Amazon VPC + // endpoint service must fulfill all requirements (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements) // for use with an external key store. Uniqueness requirements: - // - // * External key - // stores with VPC_ENDPOINT_SERVICE connectivity can share an Amazon VPC, but each - // external key store must have its own VPC endpoint service and private DNS name. + // - External key stores with VPC_ENDPOINT_SERVICE connectivity can share an + // Amazon VPC, but each external key store must have its own VPC endpoint service + // and private DNS name. XksProxyVpcEndpointServiceName *string noSmithyDocumentSerde @@ -300,6 +270,9 @@ func (c *Client) addOperationCreateCustomKeyStoreMiddlewares(stack *middleware.S if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateCustomKeyStore(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go index 06c4854d..f8858e9b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go @@ -11,54 +11,41 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Adds a grant to a KMS key. A grant is a policy instrument that allows Amazon Web -// Services principals to use KMS keys in cryptographic operations. It also can -// allow them to view a KMS key (DescribeKey) and create and manage grants. When +// Adds a grant to a KMS key. A grant is a policy instrument that allows Amazon +// Web Services principals to use KMS keys in cryptographic operations. It also can +// allow them to view a KMS key ( DescribeKey ) and create and manage grants. When // authorizing access to a KMS key, grants are considered along with key policies // and IAM policies. Grants are often used for temporary permissions because you // can create one, use its permissions, and delete it without changing your key // policies or IAM policies. For detailed information about grants, including grant -// terminology, see Grants in KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key -// Management Service Developer Guide . For examples of working with grants in -// several programming languages, see Programming grants -// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// The CreateGrant operation returns a GrantToken and a GrantId. +// terminology, see Grants in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) +// in the Key Management Service Developer Guide . For examples of working with +// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html) +// . The CreateGrant operation returns a GrantToken and a GrantId . +// - When you create, retire, or revoke a grant, there might be a brief delay, +// usually less than five minutes, until the grant is available throughout KMS. +// This state is known as eventual consistency. Once the grant has achieved +// eventual consistency, the grantee principal can use the permissions in the grant +// without identifying the grant. However, to use the permissions in the grant +// immediately, use the GrantToken that CreateGrant returns. For details, see +// Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) +// in the Key Management Service Developer Guide . +// - The CreateGrant operation also returns a GrantId . You can use the GrantId +// and a key identifier to identify the grant in the RetireGrant and RevokeGrant +// operations. To find the grant ID, use the ListGrants or ListRetirableGrants +// operations. // -// * When you -// create, retire, or revoke a grant, there might be a brief delay, usually less -// than five minutes, until the grant is available throughout KMS. This state is -// known as eventual consistency. Once the grant has achieved eventual consistency, -// the grantee principal can use the permissions in the grant without identifying -// the grant. However, to use the permissions in the grant immediately, use the -// GrantToken that CreateGrant returns. For details, see Using a grant token -// (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) -// in the Key Management Service Developer Guide . -// -// * The CreateGrant operation -// also returns a GrantId. You can use the GrantId and a key identifier to identify -// the grant in the RetireGrant and RevokeGrant operations. To find the grant ID, -// use the ListGrants or ListRetirableGrants operations. -// -// The KMS key that you use -// for this operation must be in a compatible key state. For details, see Key -// states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: Yes. To perform this -// operation on a KMS key in a different Amazon Web Services account, specify the -// key ARN in the value of the KeyId parameter. Required permissions: -// kms:CreateGrant -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: Yes. To +// perform this operation on a KMS key in a different Amazon Web Services account, +// specify the key ARN in the value of the KeyId parameter. Required permissions: +// kms:CreateGrant (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * ListGrants -// -// * ListRetirableGrants -// -// * -// RetireGrant -// -// * RevokeGrant +// - ListGrants +// - ListRetirableGrants +// - RetireGrant +// - RevokeGrant func (c *Client) CreateGrant(ctx context.Context, params *CreateGrantInput, optFns ...func(*Options)) (*CreateGrantOutput, error) { if params == nil { params = &CreateGrantInput{} @@ -77,14 +64,11 @@ func (c *Client) CreateGrant(ctx context.Context, params *CreateGrantInput, optF type CreateGrantInput struct { // The identity that gets the permissions specified in the grant. To specify the - // principal, use the Amazon Resource Name (ARN) - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of - // an Amazon Web Services principal. Valid Amazon Web Services principals include - // Amazon Web Services accounts (root), IAM users, IAM roles, federated users, and - // assumed role users. For examples of the ARN syntax to use for specifying a - // principal, see Amazon Web Services Identity and Access Management (IAM) - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) - // in the Example ARNs section of the Amazon Web Services General Reference. + // grantee principal, use the Amazon Resource Name (ARN) of an Amazon Web Services + // principal. Valid principals include Amazon Web Services accounts, IAM users, IAM + // roles, federated users, and assumed role users. For help with the ARN syntax for + // a principal, see IAM ARNs (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns) + // in the Identity and Access Management User Guide . // // This member is required. GranteePrincipal *string @@ -93,14 +77,10 @@ type CreateGrantInput struct { // use this KMS key. Specify the key ID or key ARN of the KMS key. To specify a KMS // key in a different Amazon Web Services account, you must use the key ARN. For // example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -110,73 +90,67 @@ type CreateGrantInput struct { // on the KMS key. For example, you cannot create a grant for a symmetric // encryption KMS key that allows the Sign operation, or a grant for an asymmetric // KMS key that allows the GenerateDataKey operation. If you try, KMS returns a - // ValidationError exception. For details, see Grant operations - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) + // ValidationError exception. For details, see Grant operations (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) // in the Key Management Service Developer Guide. // // This member is required. Operations []types.GrantOperation - // Specifies a grant constraint. KMS supports the EncryptionContextEquals and - // EncryptionContextSubset grant constraints. Each constraint value can include up - // to 8 encryption context pairs. The encryption context value in each constraint - // cannot exceed 384 characters. For information about grant constraints, see Using - // grant constraints - // (https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints) - // in the Key Management Service Developer Guide. For more information about - // encryption context, see Encryption context - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide . The encryption context grant - // constraints allow the permissions in the grant only when the encryption context - // in the request matches (EncryptionContextEquals) or includes - // (EncryptionContextSubset) the encryption context specified in this structure. - // The encryption context grant constraints are supported only on grant operations - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) - // that include an EncryptionContext parameter, such as cryptographic operations on - // symmetric encryption KMS keys. Grants with grant constraints can include the + // Specifies a grant constraint. Do not include confidential or sensitive + // information in this field. This field may be displayed in plaintext in + // CloudTrail logs and other output. KMS supports the EncryptionContextEquals and + // EncryptionContextSubset grant constraints, which allow the permissions in the + // grant only when the encryption context in the request matches ( + // EncryptionContextEquals ) or includes ( EncryptionContextSubset ) the encryption + // context specified in the constraint. The encryption context grant constraints + // are supported only on grant operations (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) + // that include an EncryptionContext parameter, such as cryptographic operations + // on symmetric encryption KMS keys. Grants with grant constraints can include the // DescribeKey and RetireGrant operations, but the constraint doesn't apply to // these operations. If a grant with a grant constraint includes the CreateGrant // operation, the constraint requires that any grants created with the CreateGrant // permission have an equally strict or stricter encryption context constraint. You // cannot use an encryption context grant constraint for cryptographic operations - // with asymmetric KMS keys or HMAC KMS keys. These keys don't support an - // encryption context. + // with asymmetric KMS keys or HMAC KMS keys. Operations with these keys don't + // support an encryption context. Each constraint value can include up to 8 + // encryption context pairs. The encryption context value in each constraint cannot + // exceed 384 characters. For information about grant constraints, see Using grant + // constraints (https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints) + // in the Key Management Service Developer Guide. For more information about + // encryption context, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide . Constraints *types.GrantConstraints // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string - // A friendly name for the grant. Use this value to prevent the unintended creation - // of duplicate grants when retrying this request. When this value is absent, all - // CreateGrant requests result in a new grant with a unique GrantId even if all the - // supplied parameters are identical. This can result in unintended duplicates when - // you retry the CreateGrant request. When this value is present, you can retry a - // CreateGrant request with identical parameters; if the grant already exists, the - // original GrantId is returned without creating a new grant. Note that the - // returned grant token is unique with every CreateGrant request, even when a - // duplicate GrantId is returned. All grant tokens for the same grant ID can be - // used interchangeably. + // A friendly name for the grant. Use this value to prevent the unintended + // creation of duplicate grants when retrying this request. Do not include + // confidential or sensitive information in this field. This field may be displayed + // in plaintext in CloudTrail logs and other output. When this value is absent, all + // CreateGrant requests result in a new grant with a unique GrantId even if all + // the supplied parameters are identical. This can result in unintended duplicates + // when you retry the CreateGrant request. When this value is present, you can + // retry a CreateGrant request with identical parameters; if the grant already + // exists, the original GrantId is returned without creating a new grant. Note + // that the returned grant token is unique with every CreateGrant request, even + // when a duplicate GrantId is returned. All grant tokens for the same grant ID + // can be used interchangeably. Name *string - // The principal that has permission to use the RetireGrant operation to retire the - // grant. To specify the principal, use the Amazon Resource Name (ARN) - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of - // an Amazon Web Services principal. Valid Amazon Web Services principals include - // Amazon Web Services accounts (root), IAM users, federated users, and assumed - // role users. For examples of the ARN syntax to use for specifying a principal, - // see Amazon Web Services Identity and Access Management (IAM) - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) - // in the Example ARNs section of the Amazon Web Services General Reference. The - // grant determines the retiring principal. Other principals might have permission - // to retire the grant or revoke the grant. For details, see RevokeGrant and - // Retiring and revoking grants - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) + // The principal that has permission to use the RetireGrant operation to retire + // the grant. To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // of an Amazon Web Services principal. Valid principals include Amazon Web + // Services accounts, IAM users, IAM roles, federated users, and assumed role + // users. For help with the ARN syntax for a principal, see IAM ARNs (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns) + // in the Identity and Access Management User Guide . The grant determines the + // retiring principal. Other principals might have permission to retire the grant + // or revoke the grant. For details, see RevokeGrant and Retiring and revoking + // grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) // in the Key Management Service Developer Guide. RetiringPrincipal *string @@ -185,16 +159,14 @@ type CreateGrantInput struct { type CreateGrantOutput struct { - // The unique identifier for the grant. You can use the GrantId in a ListGrants, - // RetireGrant, or RevokeGrant operation. + // The unique identifier for the grant. You can use the GrantId in a ListGrants , + // RetireGrant , or RevokeGrant operation. GrantId *string // The grant token. Use a grant token when your permission to call this operation // comes from a new grant that has not yet achieved eventual consistency. For more - // information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantToken *string @@ -255,6 +227,9 @@ func (c *Client) addOperationCreateGrantMiddlewares(stack *middleware.Stack, opt if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateGrant(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go index 0227caf6..e0b123ea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go @@ -11,18 +11,16 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a unique customer managed KMS key -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms-keys) +// Creates a unique customer managed KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms-keys) // in your Amazon Web Services account and Region. You can use a KMS key in // cryptographic operations, such as encryption and signing. Some Amazon Web // Services services let you use KMS keys that you create and manage to protect // your service resources. A KMS key is a logical representation of a cryptographic // key. In addition to the key material used in cryptographic operations, a KMS key // includes metadata, such as the key ID, key policy, creation date, description, -// and key state. For details, see Managing keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/getting-started.html) in -// the Key Management Service Developer Guide Use the parameters of CreateKey to -// specify the type of KMS key, the source of its key material, its key policy, +// and key state. For details, see Managing keys (https://docs.aws.amazon.com/kms/latest/developerguide/getting-started.html) +// in the Key Management Service Developer Guide Use the parameters of CreateKey +// to specify the type of KMS key, the source of its key material, its key policy, // description, tags, and other properties. KMS has replaced the term customer // master key (CMK) with KMS key and KMS key. The concept has not changed. To // prevent breaking changes, KMS is keeping some variations of this term. To create @@ -30,16 +28,16 @@ import ( // KMS key By default, CreateKey creates a symmetric encryption KMS key with key // material that KMS generates. This is the basic and most widely used type of KMS // key, and provides the best performance. To create a symmetric encryption KMS -// key, you don't need to specify any parameters. The default value for KeySpec, -// SYMMETRIC_DEFAULT, the default value for KeyUsage, ENCRYPT_DECRYPT, and the -// default value for Origin, AWS_KMS, create a symmetric encryption KMS key with +// key, you don't need to specify any parameters. The default value for KeySpec , +// SYMMETRIC_DEFAULT , the default value for KeyUsage , ENCRYPT_DECRYPT , and the +// default value for Origin , AWS_KMS , create a symmetric encryption KMS key with // KMS key material. If you need a key for basic encryption and decryption or you // are creating a KMS key to protect your resources in an Amazon Web Services // service, create a symmetric encryption KMS key. The key material in a symmetric // encryption key never leaves KMS unencrypted. You can use a symmetric encryption // KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically // used to generate data keys and data keys pairs. For details, see GenerateDataKey -// and GenerateDataKeyPair. Asymmetric KMS keys To create an asymmetric KMS key, +// and GenerateDataKeyPair . Asymmetric KMS keys To create an asymmetric KMS key, // use the KeySpec parameter to specify the type of key material in the KMS key. // Then, use the KeyUsage parameter to determine whether the KMS key will be used // to encrypt and decrypt or sign and verify. You can't change these properties @@ -50,68 +48,57 @@ import ( // outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or // decrypt data or sign and verify messages (but not both). KMS keys with ECC key // pairs can be used only to sign and verify messages. For information about -// asymmetric KMS keys, see Asymmetric KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// asymmetric KMS keys, see Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Key Management Service Developer Guide. HMAC KMS key To create an HMAC // KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then -// set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage +// set the KeyUsage parameter to GENERATE_VERIFY_MAC . You must set the key usage // even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS // keys. You can't change these properties after the KMS key is created. HMAC KMS // keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys -// to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to -// 4096 bytes. HMAC KMS keys are not supported in all Amazon Web Services Regions. -// If you try to create an HMAC KMS key in an Amazon Web Services Region in which -// HMAC keys are not supported, the CreateKey operation returns an -// UnsupportedOperationException. For a list of Regions in which HMAC KMS keys are -// supported, see HMAC keys in KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the Key -// Management Service Developer Guide. Multi-Region primary keys Imported key -// material To create a multi-Region primary key in the local Amazon Web Services -// Region, use the MultiRegion parameter with a value of True. To create a -// multi-Region replica key, that is, a KMS key with the same key ID and key -// material as a primary key, but in a different Amazon Web Services Region, use -// the ReplicateKey operation. To change a replica key to a primary key, and its -// primary key to a replica key, use the UpdatePrimaryRegion operation. You can -// create multi-Region KMS keys for all supported KMS key types: symmetric -// encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and -// asymmetric signing KMS keys. You can also create multi-Region keys with imported -// key material. However, you can't create multi-Region keys in a custom key store. -// This operation supports multi-Region keys, an KMS feature that lets you create -// multiple interoperable KMS keys in different Amazon Web Services Regions. -// Because these KMS keys have the same key ID, key material, and other metadata, -// you can use them interchangeably to encrypt data in one Amazon Web Services -// Region and decrypt it in a different Amazon Web Services Region without -// re-encrypting the data or making a cross-Region call. For more information about -// multi-Region keys, see Multi-Region keys in KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// to generate ( GenerateMac ) and verify ( VerifyMac ) HMAC codes for messages up +// to 4096 bytes. Multi-Region primary keys Imported key material To create a +// multi-Region primary key in the local Amazon Web Services Region, use the +// MultiRegion parameter with a value of True . To create a multi-Region replica +// key, that is, a KMS key with the same key ID and key material as a primary key, +// but in a different Amazon Web Services Region, use the ReplicateKey operation. +// To change a replica key to a primary key, and its primary key to a replica key, +// use the UpdatePrimaryRegion operation. You can create multi-Region KMS keys for +// all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, +// asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also +// create multi-Region keys with imported key material. However, you can't create +// multi-Region keys in a custom key store. This operation supports multi-Region +// keys, an KMS feature that lets you create multiple interoperable KMS keys in +// different Amazon Web Services Regions. Because these KMS keys have the same key +// ID, key material, and other metadata, you can use them interchangeably to +// encrypt data in one Amazon Web Services Region and decrypt it in a different +// Amazon Web Services Region without re-encrypting the data or making a +// cross-Region call. For more information about multi-Region keys, see +// Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) // in the Key Management Service Developer Guide. To import your own key material -// into a KMS key, begin by creating a symmetric encryption KMS key with no key -// material. To do this, use the Origin parameter of CreateKey with a value of -// EXTERNAL. Next, use GetParametersForImport operation to get a public key and -// import token, and use the public key to encrypt your key material. Then, use -// ImportKeyMaterial with your import token to import the key material. For -// step-by-step instructions, see Importing Key Material -// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in -// the Key Management Service Developer Guide . This feature supports only -// symmetric encryption KMS keys, including multi-Region symmetric encryption KMS -// keys. You cannot import key material into any other type of KMS key. To create a -// multi-Region primary key with imported key material, use the Origin parameter of -// CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of -// True. To create replicas of the multi-Region primary key, use the ReplicateKey -// operation. For instructions, see Importing key material into multi-Region keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-import.html). -// For more information about multi-Region keys, see Multi-Region keys in KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// into a KMS key, begin by creating a KMS key with no key material. To do this, +// use the Origin parameter of CreateKey with a value of EXTERNAL . Next, use +// GetParametersForImport operation to get a public key and import token. Use the +// wrapping public key to encrypt your key material. Then, use ImportKeyMaterial +// with your import token to import the key material. For step-by-step +// instructions, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) +// in the Key Management Service Developer Guide . You can import key material into +// KMS keys of all supported KMS key types: symmetric encryption KMS keys, HMAC KMS +// keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can +// also create multi-Region keys with imported key material. However, you can't +// import key material into a KMS key in a custom key store. To create a +// multi-Region primary key with imported key material, use the Origin parameter +// of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a +// value of True . To create replicas of the multi-Region primary key, use the +// ReplicateKey operation. For instructions, see Importing key material into +// multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-import.html) +// . For more information about multi-Region keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) // in the Key Management Service Developer Guide. Custom key store A custom key -// store -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) // lets you protect your Amazon Web Services resources using keys in a backing key // store that you own and manage. When you request a cryptographic operation with a // KMS key in a custom key store, the operation is performed in the backing key -// store using its cryptographic keys. KMS supports CloudHSM key stores -// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html) -// backed by an CloudHSM cluster and external key stores -// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html) +// store using its cryptographic keys. KMS supports CloudHSM key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html) +// backed by an CloudHSM cluster and external key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html) // backed by an external key manager outside of Amazon Web Services. When you // create a KMS key in an CloudHSM key store, KMS generates an encryption key in // the CloudHSM cluster and associates it with the KMS key. When you create a KMS @@ -119,39 +106,30 @@ import ( // external key manager. Some external key managers provide a simpler method for // creating a KMS key in an external key store. For details, see your external key // manager documentation. Before you create a KMS key in a custom key store, the -// ConnectionState of the key store must be CONNECTED. To connect the custom key -// store, use the ConnectCustomKeyStore operation. To find the ConnectionState, use -// the DescribeCustomKeyStores operation. To create a KMS key in a custom key -// store, use the CustomKeyStoreId. Use the default KeySpec value, -// SYMMETRIC_DEFAULT, and the default KeyUsage value, ENCRYPT_DECRYPT to create a +// ConnectionState of the key store must be CONNECTED . To connect the custom key +// store, use the ConnectCustomKeyStore operation. To find the ConnectionState , +// use the DescribeCustomKeyStores operation. To create a KMS key in a custom key +// store, use the CustomKeyStoreId . Use the default KeySpec value, +// SYMMETRIC_DEFAULT , and the default KeyUsage value, ENCRYPT_DECRYPT to create a // symmetric encryption key. No other key type is supported in a custom key store. -// To create a KMS key in an CloudHSM key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html), -// use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that -// is associated with the custom key store must have at least two active HSMs in -// different Availability Zones in the Amazon Web Services Region. To create a KMS -// key in an external key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html), -// use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId +// To create a KMS key in an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html) +// , use the Origin parameter with a value of AWS_CLOUDHSM . The CloudHSM cluster +// that is associated with the custom key store must have at least two active HSMs +// in different Availability Zones in the Amazon Web Services Region. To create a +// KMS key in an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html) +// , use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId // parameter that identifies an existing external key. Some external key managers // provide a simpler method for creating a KMS key in an external key store. For // details, see your external key manager documentation. Cross-account use: No. You // cannot use this operation to create a KMS key in a different Amazon Web Services -// account. Required permissions: kms:CreateKey -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy). To use the Tags parameter, kms:TagResource -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// account. Required permissions: kms:CreateKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy). To use the Tags parameter, kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (IAM policy). For examples and information about related permissions, see Allow -// a user to create KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policy-example-create-key) +// a user to create KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policy-example-create-key) // in the Key Management Service Developer Guide. Related operations: -// -// * -// DescribeKey -// -// * ListKeys -// -// * ScheduleKeyDeletion +// - DescribeKey +// - ListKeys +// - ScheduleKeyDeletion func (c *Client) CreateKey(ctx context.Context, params *CreateKeyInput, optFns ...func(*Options)) (*CreateKeyOutput, error) { if params == nil { params = &CreateKeyInput{} @@ -169,20 +147,17 @@ func (c *Client) CreateKey(ctx context.Context, params *CreateKeyInput, optFns . type CreateKeyInput struct { - // A flag to indicate whether to bypass the key policy lockout safety check. - // Setting this value to true increases the risk that the KMS key becomes + // Skips ("bypasses") the key policy lockout safety check. The default value is + // false. Setting this value to true increases the risk that the KMS key becomes // unmanageable. Do not set this value to true indiscriminately. For more - // information, refer to the scenario in the Default Key Policy - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the Key Management Service Developer Guide . Use this parameter only - // when you include a policy in the request and you intend to prevent the principal - // that is making the request from making a subsequent PutKeyPolicy request on the - // KMS key. The default value is false. + // information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key) + // in the Key Management Service Developer Guide. Use this parameter only when you + // intend to prevent the principal that is making the request from making a + // subsequent PutKeyPolicy request on the KMS key. BypassPolicyLockoutSafetyCheck bool - // Creates the KMS key in the specified custom key store - // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). - // The ConnectionState of the custom key store must be CONNECTED. To find the + // Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) + // . The ConnectionState of the custom key store must be CONNECTED . To find the // CustomKeyStoreID and ConnectionState use the DescribeCustomKeyStores operation. // This parameter is valid only for symmetric encryption KMS keys in a single // Region. You cannot create any other type of KMS key in a custom key store. When @@ -204,114 +179,77 @@ type CreateKeyInput struct { // A description of the KMS key. Use a description that helps you decide whether // the KMS key is appropriate for a task. The default value is an empty string (no - // description). To set or change the description after the key is created, use - // UpdateKeyDescription. + // description). Do not include confidential or sensitive information in this + // field. This field may be displayed in plaintext in CloudTrail logs and other + // output. To set or change the description after the key is created, use + // UpdateKeyDescription . Description *string - // Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, + // Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT , // creates a KMS key with a 256-bit AES-GCM key that is used for encryption and // decryption, except in China Regions, where it creates a 128-bit symmetric key // that uses SM4 encryption. For help choosing a key spec for your KMS key, see - // Choosing a KMS key type - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-types.html#symm-asymm-choose) + // Choosing a KMS key type (https://docs.aws.amazon.com/kms/latest/developerguide/key-types.html#symm-asymm-choose) // in the Key Management Service Developer Guide . The KeySpec determines whether // the KMS key contains a symmetric key or an asymmetric key pair. It also // determines the algorithms that the KMS key supports. You can't change the // KeySpec after the KMS key is created. To further restrict the algorithms that // can be used with the KMS key, use a condition key in its key policy or IAM - // policy. For more information, see kms:EncryptionAlgorithm - // (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm), - // kms:MacAlgorithm - // (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm) - // or kms:Signing Algorithm - // (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm) + // policy. For more information, see kms:EncryptionAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm) + // , kms:MacAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm) + // or kms:Signing Algorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm) // in the Key Management Service Developer Guide . Amazon Web Services services - // that are integrated with KMS - // (http://aws.amazon.com/kms/features/#AWS_Service_Integration) use symmetric - // encryption KMS keys to protect your data. These services do not support - // asymmetric KMS keys or HMAC KMS keys. KMS supports the following key specs for - // KMS keys: - // - // * Symmetric encryption key (default) - // - // * SYMMETRIC_DEFAULT - // - // * HMAC - // keys (symmetric) - // - // * HMAC_224 - // - // * HMAC_256 - // - // * HMAC_384 - // - // * HMAC_512 - // - // * Asymmetric - // RSA key pairs - // - // * RSA_2048 - // - // * RSA_3072 - // - // * RSA_4096 - // - // * Asymmetric NIST-recommended - // elliptic curve key pairs - // - // * ECC_NIST_P256 (secp256r1) - // - // * ECC_NIST_P384 - // (secp384r1) - // - // * ECC_NIST_P521 (secp521r1) - // - // * Other asymmetric elliptic curve key - // pairs - // - // * ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies. - // - // * SM2 - // key pairs (China Regions only) - // - // * SM2 + // that are integrated with KMS (http://aws.amazon.com/kms/features/#AWS_Service_Integration) + // use symmetric encryption KMS keys to protect your data. These services do not + // support asymmetric KMS keys or HMAC KMS keys. KMS supports the following key + // specs for KMS keys: + // - Symmetric encryption key (default) + // - SYMMETRIC_DEFAULT + // - HMAC keys (symmetric) + // - HMAC_224 + // - HMAC_256 + // - HMAC_384 + // - HMAC_512 + // - Asymmetric RSA key pairs + // - RSA_2048 + // - RSA_3072 + // - RSA_4096 + // - Asymmetric NIST-recommended elliptic curve key pairs + // - ECC_NIST_P256 (secp256r1) + // - ECC_NIST_P384 (secp384r1) + // - ECC_NIST_P521 (secp521r1) + // - Other asymmetric elliptic curve key pairs + // - ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies. + // - SM2 key pairs (China Regions only) + // - SM2 KeySpec types.KeySpec - // Determines the cryptographic operations - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) - // for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This + // Determines the cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // for which you can use the KMS key. The default value is ENCRYPT_DECRYPT . This // parameter is optional when you are creating a symmetric encryption KMS key; - // otherwise, it is required. You can't change the KeyUsage value after the KMS key - // is created. Select only one valid value. - // - // * For symmetric encryption KMS keys, - // omit the parameter or specify ENCRYPT_DECRYPT. - // - // * For HMAC KMS keys (symmetric), - // specify GENERATE_VERIFY_MAC. - // - // * For asymmetric KMS keys with RSA key material, - // specify ENCRYPT_DECRYPT or SIGN_VERIFY. - // - // * For asymmetric KMS keys with ECC key - // material, specify SIGN_VERIFY. - // - // * For asymmetric KMS keys with SM2 key material - // (China Regions only), specify ENCRYPT_DECRYPT or SIGN_VERIFY. + // otherwise, it is required. You can't change the KeyUsage value after the KMS + // key is created. Select only one valid value. + // - For symmetric encryption KMS keys, omit the parameter or specify + // ENCRYPT_DECRYPT . + // - For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC . + // - For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT or + // SIGN_VERIFY . + // - For asymmetric KMS keys with ECC key material, specify SIGN_VERIFY . + // - For asymmetric KMS keys with SM2 key material (China Regions only), specify + // ENCRYPT_DECRYPT or SIGN_VERIFY . KeyUsage types.KeyUsageType // Creates a multi-Region primary key that you can replicate into other Amazon Web // Services Regions. You cannot change this value after you create the KMS key. For - // a multi-Region key, set this parameter to True. For a single-Region KMS key, - // omit this parameter or set it to False. The default value is False. This + // a multi-Region key, set this parameter to True . For a single-Region KMS key, + // omit this parameter or set it to False . The default value is False . This // operation supports multi-Region keys, an KMS feature that lets you create // multiple interoperable KMS keys in different Amazon Web Services Regions. // Because these KMS keys have the same key ID, key material, and other metadata, // you can use them interchangeably to encrypt data in one Amazon Web Services // Region and decrypt it in a different Amazon Web Services Region without // re-encrypting the data or making a cross-Region call. For more information about - // multi-Region keys, see Multi-Region keys in KMS - // (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) + // multi-Region keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) // in the Key Management Service Developer Guide. This value creates a primary key, // not a replica. To create a replica key, use the ReplicateKey operation. You can // create a symmetric or asymmetric multi-Region key, and you can create a @@ -320,66 +258,52 @@ type CreateKeyInput struct { MultiRegion *bool // The source of the key material for the KMS key. You cannot change the origin - // after you create the KMS key. The default is AWS_KMS, which means that KMS - // creates the key material. To create a KMS key with no key material - // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-create-cmk.html) - // (for imported key material), set this value to EXTERNAL. For more information - // about importing key material into KMS, see Importing Key Material - // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in - // the Key Management Service Developer Guide. The EXTERNAL origin value is valid - // only for symmetric KMS keys. To create a KMS key in an CloudHSM key store - // (https://docs.aws.amazon.com/kms/latest/developerguide/create-cmk-keystore.html) + // after you create the KMS key. The default is AWS_KMS , which means that KMS + // creates the key material. To create a KMS key with no key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-create-cmk.html) + // (for imported key material), set this value to EXTERNAL . For more information + // about importing key material into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) + // in the Key Management Service Developer Guide. The EXTERNAL origin value is + // valid only for symmetric KMS keys. To create a KMS key in an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/create-cmk-keystore.html) // and create its key material in the associated CloudHSM cluster, set this value - // to AWS_CLOUDHSM. You must also use the CustomKeyStoreId parameter to identify - // the CloudHSM key store. The KeySpec value must be SYMMETRIC_DEFAULT. To create a - // KMS key in an external key store - // (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keys.html), - // set this value to EXTERNAL_KEY_STORE. You must also use the CustomKeyStoreId + // to AWS_CLOUDHSM . You must also use the CustomKeyStoreId parameter to identify + // the CloudHSM key store. The KeySpec value must be SYMMETRIC_DEFAULT . To create + // a KMS key in an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keys.html) + // , set this value to EXTERNAL_KEY_STORE . You must also use the CustomKeyStoreId // parameter to identify the external key store and the XksKeyId parameter to // identify the associated external key. The KeySpec value must be - // SYMMETRIC_DEFAULT. + // SYMMETRIC_DEFAULT . Origin types.OriginType // The key policy to attach to the KMS key. If you provide a key policy, it must // meet the following criteria: - // - // * If you don't set BypassPolicyLockoutSafetyCheck - // to true, the key policy must allow the principal that is making the CreateKey - // request to make a subsequent PutKeyPolicy request on the KMS key. This reduces - // the risk that the KMS key becomes unmanageable. For more information, refer to - // the scenario in the Default Key Policy - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section of the Key Management Service Developer Guide . - // - // * Each statement in the - // key policy must contain one or more principals. The principals in the key policy - // must exist and be visible to KMS. When you create a new Amazon Web Services - // principal (for example, an IAM user or role), you might need to enforce a delay - // before including the new principal in a key policy because the new principal - // might not be immediately visible to KMS. For more information, see Changes that - // I make are not always immediately visible - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) - // in the Amazon Web Services Identity and Access Management User Guide. - // - // If you do - // not provide a key policy, KMS attaches a default key policy to the KMS key. For - // more information, see Default Key Policy - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + // - The key policy must allow the calling principal to make a subsequent + // PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key + // becomes unmanageable. For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key) + // in the Key Management Service Developer Guide. (To omit this condition, set + // BypassPolicyLockoutSafetyCheck to true.) + // - Each statement in the key policy must contain one or more principals. The + // principals in the key policy must exist and be visible to KMS. When you create a + // new Amazon Web Services principal, you might need to enforce a delay before + // including the new principal in a key policy because the new principal might not + // be immediately visible to KMS. For more information, see Changes that I make + // are not always immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the Amazon Web Services Identity and Access Management User Guide. + // If you do not provide a key policy, KMS attaches a default key policy to the + // KMS key. For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) // in the Key Management Service Developer Guide. The key policy size quota is 32 // kilobytes (32768 bytes). For help writing and formatting a JSON policy document, - // see the IAM JSON Policy Reference - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in - // the Identity and Access Management User Guide . + // see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) + // in the Identity and Access Management User Guide . Policy *string // Assigns one or more tags to the KMS key. Use this parameter to tag the KMS key // when it is created. To tag an existing KMS key, use the TagResource operation. - // Tagging or untagging a KMS key can allow or deny permission to the KMS key. For - // details, see ABAC for KMS - // (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key - // Management Service Developer Guide. To use this parameter, you must have - // kms:TagResource - // (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. Tagging or + // untagging a KMS key can allow or deny permission to the KMS key. For details, + // see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) + // in the Key Management Service Developer Guide. To use this parameter, you must + // have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // permission in an IAM policy. Each tag consists of a tag key and a tag value. // Both the tag key and the tag value are required, but the tag value can be an // empty (null) string. You cannot have more than one tag on a KMS key with the @@ -387,34 +311,29 @@ type CreateKeyInput struct { // replaces the current tag value with the specified one. When you add tags to an // Amazon Web Services resource, Amazon Web Services generates a cost allocation // report with usage and costs aggregated by tags. Tags can also be used to control - // access to a KMS key. For details, see Tagging Keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). + // access to a KMS key. For details, see Tagging Keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html) + // . Tags []types.Tag - // Identifies the external key - // (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key) - // that serves as key material for the KMS key in an external key store - // (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html). - // Specify the ID that the external key store proxy - // (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-xks-proxy) + // Identifies the external key (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key) + // that serves as key material for the KMS key in an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html) + // . Specify the ID that the external key store proxy (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-xks-proxy) // uses to refer to the external key. For help, see the documentation for your // external key store proxy. This parameter is required for a KMS key with an - // Origin value of EXTERNAL_KEY_STORE. It is not valid for KMS keys with any other + // Origin value of EXTERNAL_KEY_STORE . It is not valid for KMS keys with any other // Origin value. The external key must be an existing 256-bit AES symmetric // encryption key hosted outside of Amazon Web Services in an external key manager // associated with the external key store specified by the CustomKeyStoreId // parameter. This key must be enabled and configured to perform encryption and // decryption. Each KMS key in an external key store must use a different external - // key. For details, see Requirements for a KMS key in an external key store - // (https://docs.aws.amazon.com/create-xks-keys.html#xks-key-requirements) in the - // Key Management Service Developer Guide. Each KMS key in an external key store is - // associated two backing keys. One is key material that KMS generates. The other - // is the external key specified by this parameter. When you use the KMS key in an - // external key store to encrypt data, the encryption operation is performed first - // by KMS using the KMS key material, and then by the external key manager using - // the specified external key, a process known as double encryption. For details, - // see Double encryption - // (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-double-encryption) + // key. For details, see Requirements for a KMS key in an external key store (https://docs.aws.amazon.com/create-xks-keys.html#xks-key-requirements) + // in the Key Management Service Developer Guide. Each KMS key in an external key + // store is associated two backing keys. One is key material that KMS generates. + // The other is the external key specified by this parameter. When you use the KMS + // key in an external key store to encrypt data, the encryption operation is + // performed first by KMS using the KMS key material, and then by the external key + // manager using the specified external key, a process known as double encryption. + // For details, see Double encryption (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-double-encryption) // in the Key Management Service Developer Guide. XksKeyId *string @@ -483,6 +402,9 @@ func (c *Client) addOperationCreateKeyMiddlewares(stack *middleware.Stack, optio if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateKey(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go index 102d8610..648d0027 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go @@ -13,75 +13,62 @@ import ( // Decrypts ciphertext that was encrypted by a KMS key using any of the following // operations: +// - Encrypt +// - GenerateDataKey +// - GenerateDataKeyPair +// - GenerateDataKeyWithoutPlaintext +// - GenerateDataKeyPairWithoutPlaintext // -// * Encrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPair -// -// * -// GenerateDataKeyWithoutPlaintext -// -// * GenerateDataKeyPairWithoutPlaintext -// -// You can -// use this operation to decrypt ciphertext that was encrypted under a symmetric -// encryption KMS key or an asymmetric encryption KMS key. When the KMS key is -// asymmetric, you must specify the KMS key and the encryption algorithm that was -// used to encrypt the ciphertext. For information about asymmetric KMS keys, see -// Asymmetric KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// You can use this operation to decrypt ciphertext that was encrypted under a +// symmetric encryption KMS key or an asymmetric encryption KMS key. When the KMS +// key is asymmetric, you must specify the KMS key and the encryption algorithm +// that was used to encrypt the ciphertext. For information about asymmetric KMS +// keys, see Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Key Management Service Developer Guide. The Decrypt operation also // decrypts ciphertext that was encrypted outside of KMS by the public key in an // KMS asymmetric KMS key. However, it cannot decrypt symmetric ciphertext produced -// by other libraries, such as the Amazon Web Services Encryption SDK -// (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) or Amazon -// S3 client-side encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). -// These libraries return a ciphertext format that is incompatible with KMS. If the -// ciphertext was encrypted under a symmetric encryption KMS key, the KeyId +// by other libraries, such as the Amazon Web Services Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) +// or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) +// . These libraries return a ciphertext format that is incompatible with KMS. If +// the ciphertext was encrypted under a symmetric encryption KMS key, the KeyId // parameter is optional. KMS can get this information from metadata that it adds // to the symmetric ciphertext blob. This feature adds durability to your // implementation by ensuring that authorized users can decrypt ciphertext decades // after it was encrypted, even if they've lost track of the key ID. However, // specifying the KMS key is always recommended as a best practice. When you use -// the KeyId parameter to specify a KMS key, KMS only uses the KMS key you specify. -// If the ciphertext was encrypted under a different KMS key, the Decrypt operation -// fails. This practice ensures that you use the KMS key that you intend. Whenever -// possible, use key policies to give users permission to call the Decrypt -// operation on a particular KMS key, instead of using IAM policies. Otherwise, you -// might create an IAM user policy that gives the user Decrypt permission on all +// the KeyId parameter to specify a KMS key, KMS only uses the KMS key you +// specify. If the ciphertext was encrypted under a different KMS key, the Decrypt +// operation fails. This practice ensures that you use the KMS key that you intend. +// Whenever possible, use key policies to give users permission to call the Decrypt +// operation on a particular KMS key, instead of using &IAM; policies. Otherwise, +// you might create an &IAM; policy that gives the user Decrypt permission on all // KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in // other accounts if the key policy for the cross-account KMS key permits it. If -// you must use an IAM policy for Decrypt permissions, limit the user to particular -// KMS keys or particular trusted accounts. For details, see Best practices for IAM -// policies -// (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policies-best-practices) -// in the Key Management Service Developer Guide. Applications in Amazon Web -// Services Nitro Enclaves can call this operation by using the Amazon Web Services -// Nitro Enclaves Development Kit -// (https://github.com/aws/aws-nitro-enclaves-sdk-c). For information about the -// supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) -// in the Key Management Service Developer Guide. The KMS key that you use for this -// operation must be in a compatible key state. For details, see Key states of KMS -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in -// the Key Management Service Developer Guide. Cross-account use: Yes. To perform -// this operation with a KMS key in a different Amazon Web Services account, -// specify the key ARN or alias ARN in the value of the KeyId parameter. Required -// permissions: kms:Decrypt -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// you must use an IAM policy for Decrypt permissions, limit the user to +// particular KMS keys or particular trusted accounts. For details, see Best +// practices for IAM policies (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policies-best-practices) +// in the Key Management Service Developer Guide. Decrypt also supports Amazon Web +// Services Nitro Enclaves (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave.html) +// , which provide an isolated compute environment in Amazon EC2. To call Decrypt +// for a Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk) +// or any Amazon Web Services SDK. Use the Recipient parameter to provide the +// attestation document for the enclave. Instead of the plaintext data, the +// response includes the plaintext data encrypted with the public key from the +// attestation document ( CiphertextForRecipient ).For information about the +// interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon +// Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) +// in the Key Management Service Developer Guide.. The KMS key that you use for +// this operation must be in a compatible key state. For details, see Key states +// of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: Yes. If you +// use the KeyId parameter to identify a KMS key in a different Amazon Web +// Services account, specify the key ARN or the alias ARN of the KMS key. Required +// permissions: kms:Decrypt (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * Encrypt -// -// * GenerateDataKey -// -// * -// GenerateDataKeyPair -// -// * ReEncrypt +// - Encrypt +// - GenerateDataKey +// - GenerateDataKeyPair +// - ReEncrypt func (c *Client) Decrypt(ctx context.Context, params *DecryptInput, optFns ...func(*Options)) (*DecryptOutput, error) { if params == nil { params = &DecryptInput{} @@ -108,13 +95,12 @@ type DecryptInput struct { // Specify the same algorithm that was used to encrypt the data. If you specify a // different algorithm, the Decrypt operation fails. This parameter is required // only when the ciphertext was encrypted under an asymmetric KMS key. The default - // value, SYMMETRIC_DEFAULT, represents the only supported algorithm that is valid + // value, SYMMETRIC_DEFAULT , represents the only supported algorithm that is valid // for symmetric encryption KMS keys. EncryptionAlgorithm types.EncryptionAlgorithmSpec // Specifies the encryption context to use when decrypting the data. An encryption - // context is valid only for cryptographic operations - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // context is valid only for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) // with a symmetric encryption KMS key. The standard asymmetric encryption // algorithms and HMAC algorithms that KMS uses do not support an encryption // context. An encryption context is a collection of non-secret key-value pairs @@ -123,64 +109,77 @@ type DecryptInput struct { // encryption context to decrypt the data. An encryption context is supported only // on operations with symmetric encryption KMS keys. On operations with symmetric // encryption KMS keys, an encryption context is optional, but it is strongly - // recommended. For more information, see Encryption context - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // recommended. For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the Key Management Service Developer Guide. EncryptionContext map[string]string // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string - // Specifies the KMS key that KMS uses to decrypt the ciphertext. Enter a key ID of - // the KMS key that was used to encrypt the ciphertext. If you identify a different - // KMS key, the Decrypt operation throws an IncorrectKeyException. This parameter - // is required only when the ciphertext was encrypted under an asymmetric KMS key. - // If you used a symmetric encryption KMS key, KMS can get the KMS key from - // metadata that it adds to the symmetric ciphertext blob. However, it is always - // recommended as a best practice. This practice ensures that you use the KMS key - // that you intend. To specify a KMS key, use its key ID, key ARN, alias name, or - // alias ARN. When using an alias name, prefix it with "alias/". To specify a KMS - // key in a different Amazon Web Services account, you must use the key ARN or - // alias ARN. For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key - // ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * - // Alias name: alias/ExampleAlias - // - // * Alias ARN: - // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key - // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias - // ARN, use ListAliases. + // Specifies the KMS key that KMS uses to decrypt the ciphertext. Enter a key ID + // of the KMS key that was used to encrypt the ciphertext. If you identify a + // different KMS key, the Decrypt operation throws an IncorrectKeyException . This + // parameter is required only when the ciphertext was encrypted under an asymmetric + // KMS key. If you used a symmetric encryption KMS key, KMS can get the KMS key + // from metadata that it adds to the symmetric ciphertext blob. However, it is + // always recommended as a best practice. This practice ensures that you use the + // KMS key that you intend. To specify a KMS key, use its key ID, key ARN, alias + // name, or alias ARN. When using an alias name, prefix it with "alias/" . To + // specify a KMS key in a different Amazon Web Services account, you must use the + // key ARN or alias ARN. For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Alias name: alias/ExampleAlias + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . To + // get the alias name and alias ARN, use ListAliases . KeyId *string + // A signed attestation document (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc) + // from an Amazon Web Services Nitro enclave and the encryption algorithm to use + // with the enclave's public key. The only valid encryption algorithm is + // RSAES_OAEP_SHA_256 . This parameter only supports attestation documents for + // Amazon Web Services Nitro Enclaves. To include this parameter, use the Amazon + // Web Services Nitro Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk) + // or any Amazon Web Services SDK. When you use this parameter, instead of + // returning the plaintext data, KMS encrypts the plaintext data with the public + // key in the attestation document, and returns the resulting ciphertext in the + // CiphertextForRecipient field in the response. This ciphertext can be decrypted + // only with the private key in the enclave. The Plaintext field in the response + // is null or empty. For information about the interaction between KMS and Amazon + // Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) + // in the Key Management Service Developer Guide. + Recipient *types.RecipientInfo + noSmithyDocumentSerde } type DecryptOutput struct { + // The plaintext data encrypted with the public key in the attestation document. + // This field is included in the response only when the Recipient parameter in the + // request includes a valid attestation document from an Amazon Web Services Nitro + // enclave. For information about the interaction between KMS and Amazon Web + // Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) + // in the Key Management Service Developer Guide. + CiphertextForRecipient []byte + // The encryption algorithm that was used to decrypt the ciphertext. EncryptionAlgorithm types.EncryptionAlgorithmSpec - // The Amazon Resource Name (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that was used to decrypt the ciphertext. + // The Amazon Resource Name ( key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) of the KMS key that was used to decrypt the ciphertext. KeyId *string // Decrypted plaintext data. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. If the + // response includes the CiphertextForRecipient field, the Plaintext field is null + // or empty. Plaintext []byte // Metadata pertaining to the operation's result. @@ -240,6 +239,9 @@ func (c *Client) addOperationDecryptMiddlewares(stack *middleware.Stack, options if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecrypt(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go index 1609069b..e55ae63b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go @@ -10,37 +10,27 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the specified alias. Adding, deleting, or updating an alias can allow or -// deny permission to the KMS key. For details, see ABAC for KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key -// Management Service Developer Guide. Because an alias is not a property of a KMS -// key, you can delete and change the aliases of a KMS key without affecting the -// KMS key. Also, aliases do not appear in the response from the DescribeKey -// operation. To get the aliases of all KMS keys, use the ListAliases operation. -// Each KMS key can have multiple aliases. To change the alias of a KMS key, use -// DeleteAlias to delete the current alias and CreateAlias to create a new alias. -// To associate an existing alias with a different KMS key, call UpdateAlias. -// Cross-account use: No. You cannot perform this operation on an alias in a -// different Amazon Web Services account. Required permissions +// Deletes the specified alias. Adding, deleting, or updating an alias can allow +// or deny permission to the KMS key. For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) +// in the Key Management Service Developer Guide. Because an alias is not a +// property of a KMS key, you can delete and change the aliases of a KMS key +// without affecting the KMS key. Also, aliases do not appear in the response from +// the DescribeKey operation. To get the aliases of all KMS keys, use the +// ListAliases operation. Each KMS key can have multiple aliases. To change the +// alias of a KMS key, use DeleteAlias to delete the current alias and CreateAlias +// to create a new alias. To associate an existing alias with a different KMS key, +// call UpdateAlias . Cross-account use: No. You cannot perform this operation on +// an alias in a different Amazon Web Services account. Required permissions +// - kms:DeleteAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the alias (IAM policy). +// - kms:DeleteAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the KMS key (key policy). // -// * kms:DeleteAlias -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the alias (IAM policy). -// -// * kms:DeleteAlias -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the KMS key (key policy). -// -// For details, see Controlling access to aliases -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) +// For details, see Controlling access to aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) // in the Key Management Service Developer Guide. Related operations: -// -// * -// CreateAlias -// -// * ListAliases -// -// * UpdateAlias +// - CreateAlias +// - ListAliases +// - UpdateAlias func (c *Client) DeleteAlias(ctx context.Context, params *DeleteAliasInput, optFns ...func(*Options)) (*DeleteAliasOutput, error) { if params == nil { params = &DeleteAliasInput{} @@ -59,7 +49,7 @@ func (c *Client) DeleteAlias(ctx context.Context, params *DeleteAliasInput, optF type DeleteAliasInput struct { // The alias to be deleted. The alias name must begin with alias/ followed by the - // alias name, such as alias/ExampleAlias. + // alias name, such as alias/ExampleAlias . // // This member is required. AliasName *string @@ -125,6 +115,9 @@ func (c *Client) addOperationDeleteAliasMiddlewares(stack *middleware.Stack, opt if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteAlias(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go index 9bf7bcbc..a55bd943 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go @@ -10,29 +10,24 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes a custom key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// This operation does not affect any backing elements of the custom key store. It -// does not delete the CloudHSM cluster that is associated with an CloudHSM key +// Deletes a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// . This operation does not affect any backing elements of the custom key store. +// It does not delete the CloudHSM cluster that is associated with an CloudHSM key // store, or affect any users or keys in the cluster. For an external key store, it // does not affect the external key store proxy, external key manager, or any -// external keys. This operation is part of the custom key stores -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// external keys. This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) // feature in KMS, which combines the convenience and extensive integration of KMS // with the isolation and control of a key store that you own and manage. The -// custom key store that you delete cannot contain any KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys). -// Before deleting the key store, verify that you will never need to use any of the -// KMS keys in the key store for any cryptographic operations -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). -// Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. After -// the required waiting period expires and all KMS keys are deleted from the custom -// key store, use DisconnectCustomKeyStore to disconnect the key store from KMS. -// Then, you can delete the custom key store. For keys in an CloudHSM key store, -// the ScheduleKeyDeletion operation makes a best effort to delete the key material -// from the associated cluster. However, you might need to manually delete the -// orphaned key material -// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) +// custom key store that you delete cannot contain any KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys) +// . Before deleting the key store, verify that you will never need to use any of +// the KMS keys in the key store for any cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// . Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. +// After the required waiting period expires and all KMS keys are deleted from the +// custom key store, use DisconnectCustomKeyStore to disconnect the key store from +// KMS. Then, you can delete the custom key store. For keys in an CloudHSM key +// store, the ScheduleKeyDeletion operation makes a best effort to delete the key +// material from the associated cluster. However, you might need to manually +// delete the orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) // from the cluster and its backups. KMS never creates, manages, or deletes // cryptographic keys in the external key manager associated with an external key // store. You must manage them using your external key manager tools. Instead of @@ -43,21 +38,13 @@ import ( // disconnected custom key store at any time. If the operation succeeds, it returns // a JSON object with no properties. Cross-account use: No. You cannot perform this // operation on a custom key store in a different Amazon Web Services account. -// Required permissions: kms:DeleteCustomKeyStore -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:DeleteCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (IAM policy) Related operations: -// -// * ConnectCustomKeyStore -// -// * -// CreateCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * DisconnectCustomKeyStore -// -// * -// UpdateCustomKeyStore +// - ConnectCustomKeyStore +// - CreateCustomKeyStore +// - DescribeCustomKeyStores +// - DisconnectCustomKeyStore +// - UpdateCustomKeyStore func (c *Client) DeleteCustomKeyStore(ctx context.Context, params *DeleteCustomKeyStoreInput, optFns ...func(*Options)) (*DeleteCustomKeyStoreOutput, error) { if params == nil { params = &DeleteCustomKeyStoreInput{} @@ -142,6 +129,9 @@ func (c *Client) addOperationDeleteCustomKeyStoreMiddlewares(stack *middleware.S if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteCustomKeyStore(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go index 7f8839b9..f8c87a7d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go @@ -10,26 +10,21 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes key material that you previously imported. This operation makes the -// specified KMS key unusable. For more information about importing key material -// into KMS, see Importing Key Material -// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in -// the Key Management Service Developer Guide. When the specified KMS key is in the -// PendingDeletion state, this operation does not change the KMS key's state. -// Otherwise, it changes the KMS key's state to PendingImport. After you delete key -// material, you can use ImportKeyMaterial to reimport the same key material into -// the KMS key. The KMS key that you use for this operation must be in a compatible -// key state. For details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: No. You cannot +// Deletes key material that was previously imported. This operation makes the +// specified KMS key temporarily unusable. To restore the usability of the KMS key, +// reimport the same key material. For more information about importing key +// material into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) +// in the Key Management Service Developer Guide. When the specified KMS key is in +// the PendingDeletion state, this operation does not change the KMS key's state. +// Otherwise, it changes the KMS key's state to PendingImport . The KMS key that +// you use for this operation must be in a compatible key state. For details, see +// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. -// Required permissions: kms:DeleteImportedKeyMaterial -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:DeleteImportedKeyMaterial (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * GetParametersForImport -// -// * ImportKeyMaterial +// - GetParametersForImport +// - ImportKeyMaterial func (c *Client) DeleteImportedKeyMaterial(ctx context.Context, params *DeleteImportedKeyMaterialInput, optFns ...func(*Options)) (*DeleteImportedKeyMaterialOutput, error) { if params == nil { params = &DeleteImportedKeyMaterialInput{} @@ -48,16 +43,12 @@ func (c *Client) DeleteImportedKeyMaterial(ctx context.Context, params *DeleteIm type DeleteImportedKeyMaterialInput struct { // Identifies the KMS key from which you are deleting imported key material. The - // Origin of the KMS key must be EXTERNAL. Specify the key ID or key ARN of the KMS - // key. For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // Origin of the KMS key must be EXTERNAL . Specify the key ID or key ARN of the + // KMS key. For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -123,6 +114,9 @@ func (c *Client) addOperationDeleteImportedKeyMaterialMiddlewares(stack *middlew if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteImportedKeyMaterial(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go index 3dfd2f9c..88d3ee3c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go @@ -12,22 +12,20 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets information about custom key stores -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// in the account and Region. This operation is part of the custom key stores -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// Gets information about custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// in the account and Region. This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) // feature in KMS, which combines the convenience and extensive integration of KMS // with the isolation and control of a key store that you own and manage. By // default, this operation returns information about all custom key stores in the // account and Region. To get only information about a particular custom key store, // use either the CustomKeyStoreName or CustomKeyStoreId parameter (but not both). // To determine whether the custom key store is connected to its CloudHSM cluster -// or external key store proxy, use the ConnectionState element in the response. If -// an attempt to connect the custom key store failed, the ConnectionState value is -// FAILED and the ConnectionErrorCode element in the response indicates the cause -// of the failure. For help interpreting the ConnectionErrorCode, see -// CustomKeyStoresListEntry. Custom key stores have a DISCONNECTED connection state -// if the key store has never been connected or you used the +// or external key store proxy, use the ConnectionState element in the response. +// If an attempt to connect the custom key store failed, the ConnectionState value +// is FAILED and the ConnectionErrorCode element in the response indicates the +// cause of the failure. For help interpreting the ConnectionErrorCode , see +// CustomKeyStoresListEntry . Custom key stores have a DISCONNECTED connection +// state if the key store has never been connected or you used the // DisconnectCustomKeyStore operation to disconnect it. Otherwise, the connection // state is CONNECTED. If your custom key store connection state is CONNECTED but // you are having trouble using it, verify that the backing store is active and @@ -36,28 +34,18 @@ import ( // operation, if any. For an external key store, verify that the external key store // proxy and its associated external key manager are reachable and enabled. For // help repairing your CloudHSM key store, see the Troubleshooting CloudHSM key -// stores -// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html). For -// help repairing your external key store, see the Troubleshooting external key -// stores -// (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html). -// Both topics are in the Key Management Service Developer Guide. Cross-account +// stores (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) +// . For help repairing your external key store, see the Troubleshooting external +// key stores (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html) +// . Both topics are in the Key Management Service Developer Guide. Cross-account // use: No. You cannot perform this operation on a custom key store in a different -// Amazon Web Services account. Required permissions: kms:DescribeCustomKeyStores -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Amazon Web Services account. Required permissions: kms:DescribeCustomKeyStores (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (IAM policy) Related operations: -// -// * ConnectCustomKeyStore -// -// * -// CreateCustomKeyStore -// -// * DeleteCustomKeyStore -// -// * DisconnectCustomKeyStore -// -// * -// UpdateCustomKeyStore +// - ConnectCustomKeyStore +// - CreateCustomKeyStore +// - DeleteCustomKeyStore +// - DisconnectCustomKeyStore +// - UpdateCustomKeyStore func (c *Client) DescribeCustomKeyStores(ctx context.Context, params *DescribeCustomKeyStoresInput, optFns ...func(*Options)) (*DescribeCustomKeyStoresOutput, error) { if params == nil { params = &DescribeCustomKeyStoresInput{} @@ -95,8 +83,8 @@ type DescribeCustomKeyStoresInput struct { Limit *int32 // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. Marker *string noSmithyDocumentSerde @@ -171,6 +159,9 @@ func (c *Client) addOperationDescribeCustomKeyStoresMiddlewares(stack *middlewar if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCustomKeyStores(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go index 09613a7b..feedac9a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go @@ -12,64 +12,45 @@ import ( ) // Provides detailed information about a KMS key. You can run DescribeKey on a -// customer managed key -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) -// or an Amazon Web Services managed key -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). -// This detailed information includes the key ARN, creation date (and deletion +// customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) +// or an Amazon Web Services managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// . This detailed information includes the key ARN, creation date (and deletion // date, if applicable), the key state, and the origin and expiration date (if any) -// of the key material. It includes fields, like KeySpec, that help you distinguish -// different types of KMS keys. It also displays the key usage (encryption, -// signing, or generating and verifying MACs) and the algorithms that the KMS key -// supports. For multi-Region keys, DescribeKey displays the primary key and all -// related replica keys. For KMS keys in CloudHSM key stores, it includes -// information about the key store, such as the key store ID and the CloudHSM -// cluster ID. For KMS keys in external key stores, it includes the custom key -// store ID and the ID of the external key. DescribeKey does not return the -// following information: +// of the key material. It includes fields, like KeySpec , that help you +// distinguish different types of KMS keys. It also displays the key usage +// (encryption, signing, or generating and verifying MACs) and the algorithms that +// the KMS key supports. For multi-Region keys , DescribeKey displays the primary +// key and all related replica keys. For KMS keys in CloudHSM key stores , it +// includes information about the key store, such as the key store ID and the +// CloudHSM cluster ID. For KMS keys in external key stores , it includes the +// custom key store ID and the ID of the external key. DescribeKey does not return +// the following information: +// - Aliases associated with the KMS key. To get this information, use +// ListAliases . +// - Whether automatic key rotation is enabled on the KMS key. To get this +// information, use GetKeyRotationStatus . Also, some key states prevent a KMS +// key from being automatically rotated. For details, see How Automatic Key +// Rotation Works (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-how-it-works) +// in the Key Management Service Developer Guide. +// - Tags on the KMS key. To get this information, use ListResourceTags . +// - Key policies and grants on the KMS key. To get this information, use +// GetKeyPolicy and ListGrants . // -// * Aliases associated with the KMS key. To get this -// information, use ListAliases. -// -// * Whether automatic key rotation is enabled on -// the KMS key. To get this information, use GetKeyRotationStatus. Also, some key -// states prevent a KMS key from being automatically rotated. For details, see How -// Automatic Key Rotation Works -// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-how-it-works) -// in the Key Management Service Developer Guide. -// -// * Tags on the KMS key. To get -// this information, use ListResourceTags. -// -// * Key policies and grants on the KMS -// key. To get this information, use GetKeyPolicy and ListGrants. -// -// In general, -// DescribeKey is a non-mutating operation. It returns data about KMS keys, but -// doesn't change them. However, Amazon Web Services services use DescribeKey to -// create Amazon Web Services managed keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// In general, DescribeKey is a non-mutating operation. It returns data about KMS +// keys, but doesn't change them. However, Amazon Web Services services use +// DescribeKey to create Amazon Web Services managed keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) // from a predefined Amazon Web Services alias with no key ID. Cross-account use: // Yes. To perform this operation with a KMS key in a different Amazon Web Services // account, specify the key ARN or alias ARN in the value of the KeyId parameter. -// Required permissions: kms:DescribeKey -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:DescribeKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * GetKeyPolicy -// -// * GetKeyRotationStatus -// -// * -// ListAliases -// -// * ListGrants -// -// * ListKeys -// -// * ListResourceTags -// -// * ListRetirableGrants +// - GetKeyPolicy +// - GetKeyRotationStatus +// - ListAliases +// - ListGrants +// - ListKeys +// - ListResourceTags +// - ListRetirableGrants func (c *Client) DescribeKey(ctx context.Context, params *DescribeKeyInput, optFns ...func(*Options)) (*DescribeKeyOutput, error) { if params == nil { params = &DescribeKeyInput{} @@ -87,40 +68,28 @@ func (c *Client) DescribeKey(ctx context.Context, params *DescribeKeyInput, optF type DescribeKeyInput struct { - // Describes the specified KMS key. If you specify a predefined Amazon Web Services - // alias (an Amazon Web Services alias with no key ID), KMS associates the alias - // with an Amazon Web Services managed key - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html##aws-managed-cmk) - // and returns its KeyId and Arn in the response. To specify a KMS key, use its key - // ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with - // "alias/". To specify a KMS key in a different Amazon Web Services account, you - // must use the key ARN or alias ARN. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * - // Alias name: alias/ExampleAlias - // - // * Alias ARN: - // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key - // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias - // ARN, use ListAliases. + // Describes the specified KMS key. If you specify a predefined Amazon Web + // Services alias (an Amazon Web Services alias with no key ID), KMS associates the + // alias with an Amazon Web Services managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html##aws-managed-cmk) + // and returns its KeyId and Arn in the response. To specify a KMS key, use its + // key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it + // with "alias/" . To specify a KMS key in a different Amazon Web Services account, + // you must use the key ARN or alias ARN. For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Alias name: alias/ExampleAlias + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . To + // get the alias name and alias ARN, use ListAliases . // // This member is required. KeyId *string // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string @@ -189,6 +158,9 @@ func (c *Client) addOperationDescribeKeyMiddlewares(stack *middleware.Stack, opt if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeKey(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go index 066f679e..227611b4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go @@ -10,19 +10,16 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the state of a KMS key to disabled. This change temporarily prevents use of -// the KMS key for cryptographic operations -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). -// For more information about how key state affects the use of a KMS key, see Key -// states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide . The KMS key that you use for this -// operation must be in a compatible key state. For details, see Key states of KMS -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in -// the Key Management Service Developer Guide. Cross-account use: No. You cannot +// Sets the state of a KMS key to disabled. This change temporarily prevents use +// of the KMS key for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// . For more information about how key state affects the use of a KMS key, see +// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide . The KMS key that you use for +// this operation must be in a compatible key state. For details, see Key states +// of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. -// Required permissions: kms:DisableKey -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:DisableKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: EnableKey func (c *Client) DisableKey(ctx context.Context, params *DisableKeyInput, optFns ...func(*Options)) (*DisableKeyOutput, error) { if params == nil { @@ -41,16 +38,12 @@ func (c *Client) DisableKey(ctx context.Context, params *DisableKeyInput, optFns type DisableKeyInput struct { - // Identifies the KMS key to disable. Specify the key ID or key ARN of the KMS key. - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // Identifies the KMS key to disable. Specify the key ID or key ARN of the KMS + // key. For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -116,6 +109,9 @@ func (c *Client) addOperationDisableKeyMiddlewares(stack *middleware.Stack, opti if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableKey(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go index 003e5023..1ba99f2c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go @@ -10,41 +10,29 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Disables automatic rotation of the key material -// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) of the -// specified symmetric encryption KMS key. Automatic key rotation is supported only -// on symmetric encryption KMS keys. You cannot enable automatic rotation of -// asymmetric KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), -// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), -// KMS keys with imported key material -// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or -// KMS keys in a custom key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// To enable or disable automatic rotation of a set of related multi-Region keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), -// set the property on the primary key. You can enable (EnableKeyRotation) and -// disable automatic rotation of the key material in customer managed KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). -// Key material rotation of Amazon Web Services managed KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// Disables automatic rotation of the key material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) +// of the specified symmetric encryption KMS key. Automatic key rotation is +// supported only on symmetric encryption KMS keys. You cannot enable automatic +// rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// , HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) +// , KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) +// , or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// . To enable or disable automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate) +// , set the property on the primary key. You can enable ( EnableKeyRotation ) and +// disable automatic rotation of the key material in customer managed KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) +// . Key material rotation of Amazon Web Services managed KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) // is not configurable. KMS always rotates the key material for every year. -// Rotation of Amazon Web Services owned KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) +// Rotation of Amazon Web Services owned KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) // varies. In May 2022, KMS changed the rotation schedule for Amazon Web Services // managed keys from every three years to every year. For details, see -// EnableKeyRotation. The KMS key that you use for this operation must be in a -// compatible key state. For details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: No. You cannot +// EnableKeyRotation . The KMS key that you use for this operation must be in a +// compatible key state. For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. -// Required permissions: kms:DisableKeyRotation -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:DisableKeyRotation (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * EnableKeyRotation -// -// * GetKeyRotationStatus +// - EnableKeyRotation +// - GetKeyRotationStatus func (c *Client) DisableKeyRotation(ctx context.Context, params *DisableKeyRotationInput, optFns ...func(*Options)) (*DisableKeyRotationOutput, error) { if params == nil { params = &DisableKeyRotationInput{} @@ -63,23 +51,15 @@ func (c *Client) DisableKeyRotation(ctx context.Context, params *DisableKeyRotat type DisableKeyRotationInput struct { // Identifies a symmetric encryption KMS key. You cannot enable or disable - // automatic rotation of asymmetric KMS keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html#asymmetric-cmks), - // HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), - // KMS keys with imported key material - // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or - // KMS keys in a custom key store - // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). - // Specify the key ID or key ARN of the KMS key. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html#asymmetric-cmks) + // , HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) + // , KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) + // , or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) + // . Specify the key ID or key ARN of the KMS key. For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -145,6 +125,9 @@ func (c *Client) addOperationDisableKeyRotationMiddlewares(stack *middleware.Sta if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableKeyRotation(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go index c4f2441b..97396991 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go @@ -10,43 +10,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Disconnects the custom key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// Disconnects the custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) // from its backing key store. This operation disconnects an CloudHSM key store // from its associated CloudHSM cluster or disconnects an external key store from // the external key store proxy that communicates with your external key manager. -// This operation is part of the custom key stores -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) // feature in KMS, which combines the convenience and extensive integration of KMS // with the isolation and control of a key store that you own and manage. While a // custom key store is disconnected, you can manage the custom key store and its // KMS keys, but you cannot create or use its KMS keys. You can reconnect the // custom key store at any time. While a custom key store is disconnected, all // attempts to create KMS keys in the custom key store or to use existing KMS keys -// in cryptographic operations -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// in cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) // will fail. This action can prevent users from storing and accessing sensitive // data. When you disconnect a custom key store, its ConnectionState changes to -// Disconnected. To find the connection state of a custom key store, use the +// Disconnected . To find the connection state of a custom key store, use the // DescribeCustomKeyStores operation. To reconnect a custom key store, use the // ConnectCustomKeyStore operation. If the operation succeeds, it returns a JSON // object with no properties. Cross-account use: No. You cannot perform this // operation on a custom key store in a different Amazon Web Services account. -// Required permissions: kms:DisconnectCustomKeyStore -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:DisconnectCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (IAM policy) Related operations: -// -// * ConnectCustomKeyStore -// -// * -// CreateCustomKeyStore -// -// * DeleteCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * -// UpdateCustomKeyStore +// - ConnectCustomKeyStore +// - CreateCustomKeyStore +// - DeleteCustomKeyStore +// - DescribeCustomKeyStores +// - UpdateCustomKeyStore func (c *Client) DisconnectCustomKeyStore(ctx context.Context, params *DisconnectCustomKeyStoreInput, optFns ...func(*Options)) (*DisconnectCustomKeyStoreOutput, error) { if params == nil { params = &DisconnectCustomKeyStoreInput{} @@ -64,8 +53,8 @@ func (c *Client) DisconnectCustomKeyStore(ctx context.Context, params *Disconnec type DisconnectCustomKeyStoreInput struct { - // Enter the ID of the custom key store you want to disconnect. To find the ID of a - // custom key store, use the DescribeCustomKeyStores operation. + // Enter the ID of the custom key store you want to disconnect. To find the ID of + // a custom key store, use the DescribeCustomKeyStores operation. // // This member is required. CustomKeyStoreId *string @@ -131,6 +120,9 @@ func (c *Client) addOperationDisconnectCustomKeyStoreMiddlewares(stack *middlewa if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisconnectCustomKeyStore(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go index f4464532..4be3252c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go @@ -11,15 +11,12 @@ import ( ) // Sets the key state of a KMS key to enabled. This allows you to use the KMS key -// for cryptographic operations -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: No. You cannot +// for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// . The KMS key that you use for this operation must be in a compatible key state. +// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. -// Required permissions: kms:EnableKey -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:EnableKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: DisableKey func (c *Client) EnableKey(ctx context.Context, params *EnableKeyInput, optFns ...func(*Options)) (*EnableKeyOutput, error) { if params == nil { @@ -40,14 +37,10 @@ type EnableKeyInput struct { // Identifies the KMS key to enable. Specify the key ID or key ARN of the KMS key. // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -113,6 +106,9 @@ func (c *Client) addOperationEnableKeyMiddlewares(stack *middleware.Stack, optio if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableKey(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go index 0a7b6bbe..737266d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go @@ -10,33 +10,24 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Enables automatic rotation of the key material -// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) of the -// specified symmetric encryption KMS key. When you enable automatic rotation of -// acustomer managed KMS key -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), -// KMS rotates the key material of the KMS key one year (approximately 365 days) +// Enables automatic rotation of the key material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) +// of the specified symmetric encryption KMS key. When you enable automatic +// rotation of a customer managed KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) +// , KMS rotates the key material of the KMS key one year (approximately 365 days) // from the enable date and every year thereafter. You can monitor rotation of the // key material for your KMS keys in CloudTrail and Amazon CloudWatch. To disable // rotation of the key material in a customer managed KMS key, use the // DisableKeyRotation operation. Automatic key rotation is supported only on -// symmetric encryption KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks). -// You cannot enable automatic rotation of asymmetric KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), -// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), -// KMS keys with imported key material -// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or -// KMS keys in a custom key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// To enable or disable automatic rotation of a set of related multi-Region keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), -// set the property on the primary key. You cannot enable or disable automatic -// rotation Amazon Web Services managed KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). -// KMS always rotates the key material of Amazon Web Services managed keys every -// year. Rotation of Amazon Web Services owned KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) +// symmetric encryption KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks) +// . You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// , HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) +// , KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) +// , or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// . To enable or disable automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate) +// , set the property on the primary key. You cannot enable or disable automatic +// rotation Amazon Web Services managed KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// . KMS always rotates the key material of Amazon Web Services managed keys every +// year. Rotation of Amazon Web Services owned KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) // varies. In May 2022, KMS changed the rotation schedule for Amazon Web Services // managed keys from every three years (approximately 1,095 days) to every year // (approximately 365 days). New Amazon Web Services managed keys are automatically @@ -44,17 +35,13 @@ import ( // thereafter. Existing Amazon Web Services managed keys are automatically rotated // one year after their most recent rotation, and every year thereafter. The KMS // key that you use for this operation must be in a compatible key state. For -// details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: No. You cannot +// details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. -// Required permissions: kms:EnableKeyRotation -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:EnableKeyRotation (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * DisableKeyRotation -// -// * GetKeyRotationStatus +// - DisableKeyRotation +// - GetKeyRotationStatus func (c *Client) EnableKeyRotation(ctx context.Context, params *EnableKeyRotationInput, optFns ...func(*Options)) (*EnableKeyRotationOutput, error) { if params == nil { params = &EnableKeyRotationInput{} @@ -73,25 +60,17 @@ func (c *Client) EnableKeyRotation(ctx context.Context, params *EnableKeyRotatio type EnableKeyRotationInput struct { // Identifies a symmetric encryption KMS key. You cannot enable automatic rotation - // of asymmetric KMS keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), - // HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), - // KMS keys with imported key material - // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or - // KMS keys in a custom key store - // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). - // To enable or disable automatic rotation of a set of related multi-Region keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), - // set the property on the primary key. Specify the key ID or key ARN of the KMS + // of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // , HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) + // , KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) + // , or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) + // . To enable or disable automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate) + // , set the property on the primary key. Specify the key ID or key ARN of the KMS // key. For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -157,6 +136,9 @@ func (c *Client) addOperationEnableKeyRotationMiddlewares(stack *middleware.Stac if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableKeyRotation(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go index e5faf29e..40adf18a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go @@ -11,19 +11,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric -// or asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT. You can use this -// operation to encrypt small amounts of arbitrary data, such as a personal +// Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a +// symmetric or asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT . You can use +// this operation to encrypt small amounts of arbitrary data, such as a personal // identifier or database password, or other sensitive information. You don't need // to use the Encrypt operation to encrypt a data key. The GenerateDataKey and // GenerateDataKeyPair operations return a plaintext data key and an encrypted copy // of that data key. If you use a symmetric encryption KMS key, you can use an // encryption context to add additional security to your encryption operation. If -// you specify an EncryptionContext when encrypting data, you must specify the same -// encryption context (a case-sensitive exact match) when decrypting the data. -// Otherwise, the request to decrypt fails with an InvalidCiphertextException. For -// more information, see Encryption Context -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// you specify an EncryptionContext when encrypting data, you must specify the +// same encryption context (a case-sensitive exact match) when decrypting the data. +// Otherwise, the request to decrypt fails with an InvalidCiphertextException . For +// more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the Key Management Service Developer Guide. If you specify an asymmetric KMS // key, you must also specify the encryption algorithm. The algorithm must be // compatible with the KMS key spec. When you use an asymmetric KMS key to encrypt @@ -37,52 +36,29 @@ import ( // asymmetric keys. The standard format for asymmetric key ciphertext does not // include configurable fields. The maximum size of the data that you can encrypt // varies with the type of KMS key and the encryption algorithm that you choose. -// -// * -// Symmetric encryption KMS keys -// -// * SYMMETRIC_DEFAULT: 4096 bytes -// -// * RSA_2048 -// -// * -// RSAES_OAEP_SHA_1: 214 bytes -// -// * RSAES_OAEP_SHA_256: 190 bytes -// -// * RSA_3072 -// -// * -// RSAES_OAEP_SHA_1: 342 bytes -// -// * RSAES_OAEP_SHA_256: 318 bytes -// -// * RSA_4096 -// -// * -// RSAES_OAEP_SHA_1: 470 bytes -// -// * RSAES_OAEP_SHA_256: 446 bytes -// -// * SM2PKE: 1024 -// bytes (China Regions only) -// -// The KMS key that you use for this operation must be -// in a compatible key state. For details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: Yes. To perform this -// operation with a KMS key in a different Amazon Web Services account, specify the -// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: -// kms:Encrypt -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// - Symmetric encryption KMS keys +// - SYMMETRIC_DEFAULT : 4096 bytes +// - RSA_2048 +// - RSAES_OAEP_SHA_1 : 214 bytes +// - RSAES_OAEP_SHA_256 : 190 bytes +// - RSA_3072 +// - RSAES_OAEP_SHA_1 : 342 bytes +// - RSAES_OAEP_SHA_256 : 318 bytes +// - RSA_4096 +// - RSAES_OAEP_SHA_1 : 470 bytes +// - RSAES_OAEP_SHA_256 : 446 bytes +// - SM2PKE : 1024 bytes (China Regions only) +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: Yes. To +// perform this operation with a KMS key in a different Amazon Web Services +// account, specify the key ARN or alias ARN in the value of the KeyId parameter. +// Required permissions: kms:Encrypt (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * Decrypt -// -// * GenerateDataKey -// -// * -// GenerateDataKeyPair +// - Decrypt +// - GenerateDataKey +// - GenerateDataKeyPair func (c *Client) Encrypt(ctx context.Context, params *EncryptInput, optFns ...func(*Options)) (*EncryptOutput, error) { if params == nil { params = &EncryptInput{} @@ -100,28 +76,19 @@ func (c *Client) Encrypt(ctx context.Context, params *EncryptInput, optFns ...fu type EncryptInput struct { - // Identifies the KMS key to use in the encryption operation. The KMS key must have - // a KeyUsage of ENCRYPT_DECRYPT. To find the KeyUsage of a KMS key, use the + // Identifies the KMS key to use in the encryption operation. The KMS key must + // have a KeyUsage of ENCRYPT_DECRYPT . To find the KeyUsage of a KMS key, use the // DescribeKey operation. To specify a KMS key, use its key ID, key ARN, alias - // name, or alias ARN. When using an alias name, prefix it with "alias/". To + // name, or alias ARN. When using an alias name, prefix it with "alias/" . To // specify a KMS key in a different Amazon Web Services account, you must use the // key ARN or alias ARN. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * - // Alias name: alias/ExampleAlias - // - // * Alias ARN: - // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key - // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias - // ARN, use ListAliases. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Alias name: alias/ExampleAlias + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . To + // get the alias name and alias ARN, use ListAliases . // // This member is required. KeyId *string @@ -134,33 +101,31 @@ type EncryptInput struct { // Specifies the encryption algorithm that KMS will use to encrypt the plaintext // message. The algorithm must be compatible with the KMS key that you specify. // This parameter is required only for asymmetric KMS keys. The default value, - // SYMMETRIC_DEFAULT, is the algorithm used for symmetric encryption KMS keys. If + // SYMMETRIC_DEFAULT , is the algorithm used for symmetric encryption KMS keys. If // you are using an asymmetric KMS key, we recommend RSAES_OAEP_SHA_256. The SM2PKE // algorithm is only available in China Regions. EncryptionAlgorithm types.EncryptionAlgorithmSpec // Specifies the encryption context that will be used to encrypt the data. An - // encryption context is valid only for cryptographic operations - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // encryption context is valid only for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) // with a symmetric encryption KMS key. The standard asymmetric encryption // algorithms and HMAC algorithms that KMS uses do not support an encryption - // context. An encryption context is a collection of non-secret key-value pairs - // that represent additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is supported only - // on operations with symmetric encryption KMS keys. On operations with symmetric + // context. Do not include confidential or sensitive information in this field. + // This field may be displayed in plaintext in CloudTrail logs and other output. An + // encryption context is a collection of non-secret key-value pairs that represent + // additional authenticated data. When you use an encryption context to encrypt + // data, you must specify the same (an exact case-sensitive match) encryption + // context to decrypt the data. An encryption context is supported only on + // operations with symmetric encryption KMS keys. On operations with symmetric // encryption KMS keys, an encryption context is optional, but it is strongly - // recommended. For more information, see Encryption context - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // recommended. For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the Key Management Service Developer Guide. EncryptionContext map[string]string // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string @@ -176,9 +141,8 @@ type EncryptOutput struct { // The encryption algorithm that was used to encrypt the plaintext. EncryptionAlgorithm types.EncryptionAlgorithmSpec - // The Amazon Resource Name (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that was used to encrypt the plaintext. + // The Amazon Resource Name ( key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) of the KMS key that was used to encrypt the plaintext. KeyId *string // Metadata pertaining to the operation's result. @@ -238,6 +202,9 @@ func (c *Client) addOperationEncryptMiddlewares(stack *middleware.Stack, options if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEncrypt(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go index 073edf96..69f71190 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go @@ -19,77 +19,68 @@ import ( // key with the encrypted data. To generate a data key, specify the symmetric // encryption KMS key that will be used to encrypt the data key. You cannot use an // asymmetric KMS key to encrypt data keys. To get the type of your KMS key, use -// the DescribeKey operation. You must also specify the length of the data key. Use -// either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and -// 256-bit data keys, use the KeySpec parameter. To generate an SM4 data key (China -// Regions only), specify a KeySpec value of AES_128 or NumberOfBytes value of 128. -// The symmetric encryption key used in China Regions to encrypt your data key is -// an SM4 encryption key. To get only an encrypted copy of the data key, use -// GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use -// the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get -// a cryptographically secure random byte string, use GenerateRandom. You can use -// an optional encryption context to add additional security to the encryption -// operation. If you specify an EncryptionContext, you must specify the same -// encryption context (a case-sensitive exact match) when decrypting the encrypted -// data key. Otherwise, the request to decrypt fails with an -// InvalidCiphertextException. For more information, see Encryption Context -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// in the Key Management Service Developer Guide. Applications in Amazon Web -// Services Nitro Enclaves can call this operation by using the Amazon Web Services -// Nitro Enclaves Development Kit -// (https://github.com/aws/aws-nitro-enclaves-sdk-c). For information about the -// supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) -// in the Key Management Service Developer Guide. The KMS key that you use for this -// operation must be in a compatible key state. For details, see Key states of KMS -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in -// the Key Management Service Developer Guide. How to use your data key We +// the DescribeKey operation. You must also specify the length of the data key. +// Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit +// and 256-bit data keys, use the KeySpec parameter. To generate a 128-bit SM4 +// data key (China Regions only), specify a KeySpec value of AES_128 or a +// NumberOfBytes value of 16 . The symmetric encryption key used in China Regions +// to encrypt your data key is an SM4 encryption key. To get only an encrypted copy +// of the data key, use GenerateDataKeyWithoutPlaintext . To generate an asymmetric +// data key pair, use the GenerateDataKeyPair or +// GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure +// random byte string, use GenerateRandom . You can use an optional encryption +// context to add additional security to the encryption operation. If you specify +// an EncryptionContext , you must specify the same encryption context (a +// case-sensitive exact match) when decrypting the encrypted data key. Otherwise, +// the request to decrypt fails with an InvalidCiphertextException . For more +// information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// in the Key Management Service Developer Guide. GenerateDataKey also supports +// Amazon Web Services Nitro Enclaves (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave.html) +// , which provide an isolated compute environment in Amazon EC2. To call +// GenerateDataKey for an Amazon Web Services Nitro enclave, use the Amazon Web +// Services Nitro Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk) +// or any Amazon Web Services SDK. Use the Recipient parameter to provide the +// attestation document for the enclave. GenerateDataKey returns a copy of the +// data key encrypted under the specified KMS key, as usual. But instead of a +// plaintext copy of the data key, the response includes a copy of the data key +// encrypted under the public key from the attestation document ( +// CiphertextForRecipient ). For information about the interaction between KMS and +// Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves +// uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) +// in the Key Management Service Developer Guide.. The KMS key that you use for +// this operation must be in a compatible key state. For details, see Key states +// of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. How to use your data key We // recommend that you use the following pattern to encrypt data locally in your // application. You can write your own code or use a client-side encryption -// library, such as the Amazon Web Services Encryption SDK -// (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/), the Amazon -// DynamoDB Encryption Client -// (https://docs.aws.amazon.com/dynamodb-encryption-client/latest/devguide/), or -// Amazon S3 client-side encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) +// library, such as the Amazon Web Services Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) +// , the Amazon DynamoDB Encryption Client (https://docs.aws.amazon.com/dynamodb-encryption-client/latest/devguide/) +// , or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) // to do these tasks for you. To encrypt data outside of KMS: // -// * Use the -// GenerateDataKey operation to get a data key. +// - Use the GenerateDataKey operation to get a data key. // -// * Use the plaintext data key (in -// the Plaintext field of the response) to encrypt your data outside of KMS. Then -// erase the plaintext data key from memory. +// - Use the plaintext data key (in the Plaintext field of the response) to +// encrypt your data outside of KMS. Then erase the plaintext data key from memory. // -// * Store the encrypted data key (in -// the CiphertextBlob field of the response) with the encrypted data. +// - Store the encrypted data key (in the CiphertextBlob field of the response) +// with the encrypted data. // -// To decrypt -// data outside of KMS: +// To decrypt data outside of KMS: +// - Use the Decrypt operation to decrypt the encrypted data key. The operation +// returns a plaintext copy of the data key. +// - Use the plaintext data key to decrypt data outside of KMS, then erase the +// plaintext data key from memory. // -// * Use the Decrypt operation to decrypt the encrypted data -// key. The operation returns a plaintext copy of the data key. -// -// * Use the -// plaintext data key to decrypt data outside of KMS, then erase the plaintext data -// key from memory. -// -// Cross-account use: Yes. To perform this operation with a KMS -// key in a different Amazon Web Services account, specify the key ARN or alias ARN -// in the value of the KeyId parameter. Required permissions: kms:GenerateDataKey -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Cross-account use: Yes. To perform this operation with a KMS key in a different +// Amazon Web Services account, specify the key ARN or alias ARN in the value of +// the KeyId parameter. Required permissions: kms:GenerateDataKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKeyPair -// -// * -// GenerateDataKeyPairWithoutPlaintext -// -// * GenerateDataKeyWithoutPlaintext +// - Decrypt +// - Encrypt +// - GenerateDataKeyPair +// - GenerateDataKeyPairWithoutPlaintext +// - GenerateDataKeyWithoutPlaintext func (c *Client) GenerateDataKey(ctx context.Context, params *GenerateDataKeyInput, optFns ...func(*Options)) (*GenerateDataKeyOutput, error) { if params == nil { params = &GenerateDataKeyInput{} @@ -109,48 +100,38 @@ type GenerateDataKeyInput struct { // Specifies the symmetric encryption KMS key that encrypts the data key. You // cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get - // the type and origin of your KMS key, use the DescribeKey operation. To specify a - // KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias - // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web - // Services account, you must use the key ARN or alias ARN. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * - // Alias name: alias/ExampleAlias - // - // * Alias ARN: - // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key - // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias - // ARN, use ListAliases. + // the type and origin of your KMS key, use the DescribeKey operation. To specify + // a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an + // alias name, prefix it with "alias/" . To specify a KMS key in a different Amazon + // Web Services account, you must use the key ARN or alias ARN. For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Alias name: alias/ExampleAlias + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . To + // get the alias name and alias ARN, use ListAliases . // // This member is required. KeyId *string - // Specifies the encryption context that will be used when encrypting the data key. - // An encryption context is a collection of non-secret key-value pairs that - // represent additional authenticated data. When you use an encryption context to - // encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is supported only - // on operations with symmetric encryption KMS keys. On operations with symmetric + // Specifies the encryption context that will be used when encrypting the data + // key. Do not include confidential or sensitive information in this field. This + // field may be displayed in plaintext in CloudTrail logs and other output. An + // encryption context is a collection of non-secret key-value pairs that represent + // additional authenticated data. When you use an encryption context to encrypt + // data, you must specify the same (an exact case-sensitive match) encryption + // context to decrypt the data. An encryption context is supported only on + // operations with symmetric encryption KMS keys. On operations with symmetric // encryption KMS keys, an encryption context is optional, but it is strongly - // recommended. For more information, see Encryption context - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // recommended. For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the Key Management Service Developer Guide. EncryptionContext map[string]string // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string @@ -167,6 +148,24 @@ type GenerateDataKeyInput struct { // GenerateDataKey request. NumberOfBytes *int32 + // A signed attestation document (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc) + // from an Amazon Web Services Nitro enclave and the encryption algorithm to use + // with the enclave's public key. The only valid encryption algorithm is + // RSAES_OAEP_SHA_256 . This parameter only supports attestation documents for + // Amazon Web Services Nitro Enclaves. To include this parameter, use the Amazon + // Web Services Nitro Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk) + // or any Amazon Web Services SDK. When you use this parameter, instead of + // returning the plaintext data key, KMS encrypts the plaintext data key under the + // public key in the attestation document, and returns the resulting ciphertext in + // the CiphertextForRecipient field in the response. This ciphertext can be + // decrypted only with the private key in the enclave. The CiphertextBlob field in + // the response contains a copy of the data key encrypted under the KMS key + // specified by the KeyId parameter. The Plaintext field in the response is null + // or empty. For information about the interaction between KMS and Amazon Web + // Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) + // in the Key Management Service Developer Guide. + Recipient *types.RecipientInfo + noSmithyDocumentSerde } @@ -176,15 +175,25 @@ type GenerateDataKeyOutput struct { // Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. CiphertextBlob []byte - // The Amazon Resource Name (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that encrypted the data key. + // The plaintext data key encrypted with the public key from the Nitro enclave. + // This ciphertext can be decrypted only by using a private key in the Nitro + // enclave. This field is included in the response only when the Recipient + // parameter in the request includes a valid attestation document from an Amazon + // Web Services Nitro enclave. For information about the interaction between KMS + // and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro + // Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) + // in the Key Management Service Developer Guide. + CiphertextForRecipient []byte + + // The Amazon Resource Name ( key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) of the KMS key that encrypted the data key. KeyId *string // The plaintext data key. When you use the HTTP API or the Amazon Web Services // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. Use this // data key to encrypt your data outside of KMS. Then, remove it from memory as - // soon as possible. + // soon as possible. If the response includes the CiphertextForRecipient field, + // the Plaintext field is null or empty. Plaintext []byte // Metadata pertaining to the operation's result. @@ -244,6 +253,9 @@ func (c *Client) addOperationGenerateDataKeyMiddlewares(stack *middleware.Stack, if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKey(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go index f67b704c..7f09a563 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go @@ -11,9 +11,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a unique asymmetric data key pair for use outside of KMS. This operation -// returns a plaintext public key, a plaintext private key, and a copy of the -// private key that is encrypted under the symmetric encryption KMS key you +// Returns a unique asymmetric data key pair for use outside of KMS. This +// operation returns a plaintext public key, a plaintext private key, and a copy of +// the private key that is encrypted under the symmetric encryption KMS key you // specify. You can use the data key pair to perform asymmetric cryptography and // implement digital signatures outside of KMS. The bytes in the keys are random; // they not related to the caller or to the KMS key that is used to encrypt the @@ -24,50 +24,56 @@ import ( // generate a data key pair, you must specify a symmetric encryption KMS key to // encrypt the private key in a data key pair. You cannot use an asymmetric KMS key // or a KMS key in a custom key store. To get the type and origin of your KMS key, -// use the DescribeKey operation. Use the KeyPairSpec parameter to choose an RSA or -// Elliptic Curve (ECC) data key pair. In China Regions, you can also choose an SM2 -// data key pair. KMS recommends that you use ECC key pairs for signing, and use -// RSA and SM2 key pairs for either encryption or signing, but not both. However, -// KMS cannot enforce any restrictions on the use of data key pairs outside of KMS. -// If you are using the data key pair to encrypt data, or for any operation where -// you don't immediately need a private key, consider using the +// use the DescribeKey operation. Use the KeyPairSpec parameter to choose an RSA +// or Elliptic Curve (ECC) data key pair. In China Regions, you can also choose an +// SM2 data key pair. KMS recommends that you use ECC key pairs for signing, and +// use RSA and SM2 key pairs for either encryption or signing, but not both. +// However, KMS cannot enforce any restrictions on the use of data key pairs +// outside of KMS. If you are using the data key pair to encrypt data, or for any +// operation where you don't immediately need a private key, consider using the // GenerateDataKeyPairWithoutPlaintext operation. // GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an // encrypted private key, but omits the plaintext private key that you need only to // decrypt ciphertext or sign a message. Later, when you need to decrypt the data // or sign a message, use the Decrypt operation to decrypt the encrypted private -// key in the data key pair. GenerateDataKeyPair returns a unique data key pair for -// each request. The bytes in the keys are random; they are not related to the +// key in the data key pair. GenerateDataKeyPair returns a unique data key pair +// for each request. The bytes in the keys are random; they are not related to the // caller or the KMS key that is used to encrypt the private key. The public key is -// a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280 -// (https://tools.ietf.org/html/rfc5280). The private key is a DER-encoded PKCS8 -// PrivateKeyInfo, as specified in RFC 5958 (https://tools.ietf.org/html/rfc5958). -// You can use an optional encryption context to add additional security to the -// encryption operation. If you specify an EncryptionContext, you must specify the -// same encryption context (a case-sensitive exact match) when decrypting the -// encrypted data key. Otherwise, the request to decrypt fails with an -// InvalidCiphertextException. For more information, see Encryption Context -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280 (https://tools.ietf.org/html/rfc5280) +// . The private key is a DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC +// 5958 (https://tools.ietf.org/html/rfc5958) . GenerateDataKeyPair also supports +// Amazon Web Services Nitro Enclaves (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave.html) +// , which provide an isolated compute environment in Amazon EC2. To call +// GenerateDataKeyPair for an Amazon Web Services Nitro enclave, use the Amazon +// Web Services Nitro Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk) +// or any Amazon Web Services SDK. Use the Recipient parameter to provide the +// attestation document for the enclave. GenerateDataKeyPair returns the public +// data key and a copy of the private data key encrypted under the specified KMS +// key, as usual. But instead of a plaintext copy of the private data key ( +// PrivateKeyPlaintext ), the response includes a copy of the private data key +// encrypted under the public key from the attestation document ( +// CiphertextForRecipient ). For information about the interaction between KMS and +// Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves +// uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) +// in the Key Management Service Developer Guide.. You can use an optional +// encryption context to add additional security to the encryption operation. If +// you specify an EncryptionContext , you must specify the same encryption context +// (a case-sensitive exact match) when decrypting the encrypted data key. +// Otherwise, the request to decrypt fails with an InvalidCiphertextException . For +// more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the Key Management Service Developer Guide. The KMS key that you use for this // operation must be in a compatible key state. For details, see Key states of KMS // keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in // the Key Management Service Developer Guide. Cross-account use: Yes. To perform // this operation with a KMS key in a different Amazon Web Services account, // specify the key ARN or alias ARN in the value of the KeyId parameter. Required -// permissions: kms:GenerateDataKeyPair -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// permissions: kms:GenerateDataKeyPair (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKey -// -// * -// GenerateDataKeyPairWithoutPlaintext -// -// * GenerateDataKeyWithoutPlaintext +// - Decrypt +// - Encrypt +// - GenerateDataKey +// - GenerateDataKeyPairWithoutPlaintext +// - GenerateDataKeyWithoutPlaintext func (c *Client) GenerateDataKeyPair(ctx context.Context, params *GenerateDataKeyPairInput, optFns ...func(*Options)) (*GenerateDataKeyPairOutput, error) { if params == nil { params = &GenerateDataKeyPairInput{} @@ -89,24 +95,16 @@ type GenerateDataKeyPairInput struct { // data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom // key store. To get the type and origin of your KMS key, use the DescribeKey // operation. To specify a KMS key, use its key ID, key ARN, alias name, or alias - // ARN. When using an alias name, prefix it with "alias/". To specify a KMS key in + // ARN. When using an alias name, prefix it with "alias/" . To specify a KMS key in // a different Amazon Web Services account, you must use the key ARN or alias ARN. // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * - // Alias name: alias/ExampleAlias - // - // * Alias ARN: - // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key - // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias - // ARN, use ListAliases. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Alias name: alias/ExampleAlias + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . To + // get the alias name and alias ARN, use ListAliases . // // This member is required. KeyId *string @@ -121,35 +119,61 @@ type GenerateDataKeyPairInput struct { KeyPairSpec types.DataKeyPairSpec // Specifies the encryption context that will be used when encrypting the private - // key in the data key pair. An encryption context is a collection of non-secret - // key-value pairs that represent additional authenticated data. When you use an - // encryption context to encrypt data, you must specify the same (an exact - // case-sensitive match) encryption context to decrypt the data. An encryption - // context is supported only on operations with symmetric encryption KMS keys. On - // operations with symmetric encryption KMS keys, an encryption context is - // optional, but it is strongly recommended. For more information, see Encryption - // context - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // key in the data key pair. Do not include confidential or sensitive information + // in this field. This field may be displayed in plaintext in CloudTrail logs and + // other output. An encryption context is a collection of non-secret key-value + // pairs that represent additional authenticated data. When you use an encryption + // context to encrypt data, you must specify the same (an exact case-sensitive + // match) encryption context to decrypt the data. An encryption context is + // supported only on operations with symmetric encryption KMS keys. On operations + // with symmetric encryption KMS keys, an encryption context is optional, but it is + // strongly recommended. For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the Key Management Service Developer Guide. EncryptionContext map[string]string // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string + // A signed attestation document (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc) + // from an Amazon Web Services Nitro enclave and the encryption algorithm to use + // with the enclave's public key. The only valid encryption algorithm is + // RSAES_OAEP_SHA_256 . This parameter only supports attestation documents for + // Amazon Web Services Nitro Enclaves. To include this parameter, use the Amazon + // Web Services Nitro Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk) + // or any Amazon Web Services SDK. When you use this parameter, instead of + // returning a plaintext copy of the private data key, KMS encrypts the plaintext + // private data key under the public key in the attestation document, and returns + // the resulting ciphertext in the CiphertextForRecipient field in the response. + // This ciphertext can be decrypted only with the private key in the enclave. The + // CiphertextBlob field in the response contains a copy of the private data key + // encrypted under the KMS key specified by the KeyId parameter. The + // PrivateKeyPlaintext field in the response is null or empty. For information + // about the interaction between KMS and Amazon Web Services Nitro Enclaves, see + // How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) + // in the Key Management Service Developer Guide. + Recipient *types.RecipientInfo + noSmithyDocumentSerde } type GenerateDataKeyPairOutput struct { - // The Amazon Resource Name (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that encrypted the private key. + // The plaintext private data key encrypted with the public key from the Nitro + // enclave. This ciphertext can be decrypted only by using a private key in the + // Nitro enclave. This field is included in the response only when the Recipient + // parameter in the request includes a valid attestation document from an Amazon + // Web Services Nitro enclave. For information about the interaction between KMS + // and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro + // Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) + // in the Key Management Service Developer Guide. + CiphertextForRecipient []byte + + // The Amazon Resource Name ( key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) of the KMS key that encrypted the private key. KeyId *string // The type of data key pair that was generated. @@ -162,7 +186,8 @@ type GenerateDataKeyPairOutput struct { // The plaintext copy of the private key. When you use the HTTP API or the Amazon // Web Services CLI, the value is Base64-encoded. Otherwise, it is not - // Base64-encoded. + // Base64-encoded. If the response includes the CiphertextForRecipient field, the + // PrivateKeyPlaintext field is null or empty. PrivateKeyPlaintext []byte // The public key (in plaintext). When you use the HTTP API or the Amazon Web @@ -226,6 +251,9 @@ func (c *Client) addOperationGenerateDataKeyPairMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyPair(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go index dc68638f..469aa2c7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go @@ -11,12 +11,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a unique asymmetric data key pair for use outside of KMS. This operation -// returns a plaintext public key and a copy of the private key that is encrypted -// under the symmetric encryption KMS key you specify. Unlike GenerateDataKeyPair, -// this operation does not return a plaintext private key. The bytes in the keys -// are random; they are not related to the caller or to the KMS key that is used to -// encrypt the private key. You can use the public key that +// Returns a unique asymmetric data key pair for use outside of KMS. This +// operation returns a plaintext public key and a copy of the private key that is +// encrypted under the symmetric encryption KMS key you specify. Unlike +// GenerateDataKeyPair , this operation does not return a plaintext private key. +// The bytes in the keys are random; they are not related to the caller or to the +// KMS key that is used to encrypt the private key. You can use the public key that // GenerateDataKeyPairWithoutPlaintext returns to encrypt data or verify a // signature outside of KMS. Then, store the encrypted private key with the data. // When you are ready to decrypt data or sign a message, you can use the Decrypt @@ -32,34 +32,25 @@ import ( // GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each // request. The bytes in the key are not related to the caller or KMS key that is // used to encrypt the private key. The public key is a DER-encoded X.509 -// SubjectPublicKeyInfo, as specified in RFC 5280 -// (https://tools.ietf.org/html/rfc5280). You can use an optional encryption -// context to add additional security to the encryption operation. If you specify -// an EncryptionContext, you must specify the same encryption context (a -// case-sensitive exact match) when decrypting the encrypted data key. Otherwise, -// the request to decrypt fails with an InvalidCiphertextException. For more -// information, see Encryption Context -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// SubjectPublicKeyInfo, as specified in RFC 5280 (https://tools.ietf.org/html/rfc5280) +// . You can use an optional encryption context to add additional security to the +// encryption operation. If you specify an EncryptionContext , you must specify the +// same encryption context (a case-sensitive exact match) when decrypting the +// encrypted data key. Otherwise, the request to decrypt fails with an +// InvalidCiphertextException . For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the Key Management Service Developer Guide. The KMS key that you use for this // operation must be in a compatible key state. For details, see Key states of KMS // keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in // the Key Management Service Developer Guide. Cross-account use: Yes. To perform // this operation with a KMS key in a different Amazon Web Services account, // specify the key ARN or alias ARN in the value of the KeyId parameter. Required -// permissions: kms:GenerateDataKeyPairWithoutPlaintext -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// permissions: kms:GenerateDataKeyPairWithoutPlaintext (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKey -// -// * -// GenerateDataKeyPair -// -// * GenerateDataKeyWithoutPlaintext +// - Decrypt +// - Encrypt +// - GenerateDataKey +// - GenerateDataKeyPair +// - GenerateDataKeyWithoutPlaintext func (c *Client) GenerateDataKeyPairWithoutPlaintext(ctx context.Context, params *GenerateDataKeyPairWithoutPlaintextInput, optFns ...func(*Options)) (*GenerateDataKeyPairWithoutPlaintextOutput, error) { if params == nil { params = &GenerateDataKeyPairWithoutPlaintextInput{} @@ -81,24 +72,16 @@ type GenerateDataKeyPairWithoutPlaintextInput struct { // data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom // key store. To get the type and origin of your KMS key, use the DescribeKey // operation. To specify a KMS key, use its key ID, key ARN, alias name, or alias - // ARN. When using an alias name, prefix it with "alias/". To specify a KMS key in + // ARN. When using an alias name, prefix it with "alias/" . To specify a KMS key in // a different Amazon Web Services account, you must use the key ARN or alias ARN. // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * - // Alias name: alias/ExampleAlias - // - // * Alias ARN: - // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key - // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias - // ARN, use ListAliases. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Alias name: alias/ExampleAlias + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . To + // get the alias name and alias ARN, use ListAliases . // // This member is required. KeyId *string @@ -113,24 +96,22 @@ type GenerateDataKeyPairWithoutPlaintextInput struct { KeyPairSpec types.DataKeyPairSpec // Specifies the encryption context that will be used when encrypting the private - // key in the data key pair. An encryption context is a collection of non-secret - // key-value pairs that represent additional authenticated data. When you use an - // encryption context to encrypt data, you must specify the same (an exact - // case-sensitive match) encryption context to decrypt the data. An encryption - // context is supported only on operations with symmetric encryption KMS keys. On - // operations with symmetric encryption KMS keys, an encryption context is - // optional, but it is strongly recommended. For more information, see Encryption - // context - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // key in the data key pair. Do not include confidential or sensitive information + // in this field. This field may be displayed in plaintext in CloudTrail logs and + // other output. An encryption context is a collection of non-secret key-value + // pairs that represent additional authenticated data. When you use an encryption + // context to encrypt data, you must specify the same (an exact case-sensitive + // match) encryption context to decrypt the data. An encryption context is + // supported only on operations with symmetric encryption KMS keys. On operations + // with symmetric encryption KMS keys, an encryption context is optional, but it is + // strongly recommended. For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the Key Management Service Developer Guide. EncryptionContext map[string]string // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string @@ -139,9 +120,8 @@ type GenerateDataKeyPairWithoutPlaintextInput struct { type GenerateDataKeyPairWithoutPlaintextOutput struct { - // The Amazon Resource Name (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that encrypted the private key. + // The Amazon Resource Name ( key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) of the KMS key that encrypted the private key. KeyId *string // The type of data key pair that was generated. @@ -213,6 +193,9 @@ func (c *Client) addOperationGenerateDataKeyPairWithoutPlaintextMiddlewares(stac if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyPairWithoutPlaintext(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go index 06a49b6e..a32ca85b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go @@ -26,44 +26,36 @@ import ( // decrypts the data key, uses the plaintext data key to encrypt data, puts the // encrypted data into the container, and then destroys the plaintext data key. In // this system, the component that creates the containers never sees the plaintext -// data key. To request an asymmetric data key pair, use the GenerateDataKeyPair or -// GenerateDataKeyPairWithoutPlaintext operations. To generate a data key, you must -// specify the symmetric encryption KMS key that is used to encrypt the data key. -// You cannot use an asymmetric KMS key or a key in a custom key store to generate -// a data key. To get the type of your KMS key, use the DescribeKey operation. You -// must also specify the length of the data key. Use either the KeySpec or -// NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use -// the KeySpec parameter. To generate an SM4 data key (China Regions only), specify -// a KeySpec value of AES_128 or NumberOfBytes value of 128. The symmetric -// encryption key used in China Regions to encrypt your data key is an SM4 -// encryption key. If the operation succeeds, you will find the encrypted copy of -// the data key in the CiphertextBlob field. You can use an optional encryption -// context to add additional security to the encryption operation. If you specify -// an EncryptionContext, you must specify the same encryption context (a -// case-sensitive exact match) when decrypting the encrypted data key. Otherwise, -// the request to decrypt fails with an InvalidCiphertextException. For more -// information, see Encryption Context -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// data key. To request an asymmetric data key pair, use the GenerateDataKeyPair +// or GenerateDataKeyPairWithoutPlaintext operations. To generate a data key, you +// must specify the symmetric encryption KMS key that is used to encrypt the data +// key. You cannot use an asymmetric KMS key or a key in a custom key store to +// generate a data key. To get the type of your KMS key, use the DescribeKey +// operation. You must also specify the length of the data key. Use either the +// KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit +// data keys, use the KeySpec parameter. To generate an SM4 data key (China +// Regions only), specify a KeySpec value of AES_128 or NumberOfBytes value of 16 . +// The symmetric encryption key used in China Regions to encrypt your data key is +// an SM4 encryption key. If the operation succeeds, you will find the encrypted +// copy of the data key in the CiphertextBlob field. You can use an optional +// encryption context to add additional security to the encryption operation. If +// you specify an EncryptionContext , you must specify the same encryption context +// (a case-sensitive exact match) when decrypting the encrypted data key. +// Otherwise, the request to decrypt fails with an InvalidCiphertextException . For +// more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the Key Management Service Developer Guide. The KMS key that you use for this // operation must be in a compatible key state. For details, see Key states of KMS // keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in // the Key Management Service Developer Guide. Cross-account use: Yes. To perform // this operation with a KMS key in a different Amazon Web Services account, // specify the key ARN or alias ARN in the value of the KeyId parameter. Required -// permissions: kms:GenerateDataKeyWithoutPlaintext -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// permissions: kms:GenerateDataKeyWithoutPlaintext (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKey -// -// * -// GenerateDataKeyPair -// -// * GenerateDataKeyPairWithoutPlaintext +// - Decrypt +// - Encrypt +// - GenerateDataKey +// - GenerateDataKeyPair +// - GenerateDataKeyPairWithoutPlaintext func (c *Client) GenerateDataKeyWithoutPlaintext(ctx context.Context, params *GenerateDataKeyWithoutPlaintextInput, optFns ...func(*Options)) (*GenerateDataKeyWithoutPlaintextOutput, error) { if params == nil { params = &GenerateDataKeyWithoutPlaintextInput{} @@ -83,48 +75,38 @@ type GenerateDataKeyWithoutPlaintextInput struct { // Specifies the symmetric encryption KMS key that encrypts the data key. You // cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get - // the type and origin of your KMS key, use the DescribeKey operation. To specify a - // KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias - // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web - // Services account, you must use the key ARN or alias ARN. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * - // Alias name: alias/ExampleAlias - // - // * Alias ARN: - // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key - // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias - // ARN, use ListAliases. + // the type and origin of your KMS key, use the DescribeKey operation. To specify + // a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an + // alias name, prefix it with "alias/" . To specify a KMS key in a different Amazon + // Web Services account, you must use the key ARN or alias ARN. For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Alias name: alias/ExampleAlias + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . To + // get the alias name and alias ARN, use ListAliases . // // This member is required. KeyId *string - // Specifies the encryption context that will be used when encrypting the data key. - // An encryption context is a collection of non-secret key-value pairs that - // represent additional authenticated data. When you use an encryption context to - // encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is supported only - // on operations with symmetric encryption KMS keys. On operations with symmetric + // Specifies the encryption context that will be used when encrypting the data + // key. Do not include confidential or sensitive information in this field. This + // field may be displayed in plaintext in CloudTrail logs and other output. An + // encryption context is a collection of non-secret key-value pairs that represent + // additional authenticated data. When you use an encryption context to encrypt + // data, you must specify the same (an exact case-sensitive match) encryption + // context to decrypt the data. An encryption context is supported only on + // operations with symmetric encryption KMS keys. On operations with symmetric // encryption KMS keys, an encryption context is optional, but it is strongly - // recommended. For more information, see Encryption context - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // recommended. For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the Key Management Service Developer Guide. EncryptionContext map[string]string // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string @@ -132,8 +114,8 @@ type GenerateDataKeyWithoutPlaintextInput struct { // AES_256 to generate a 256-bit symmetric key. KeySpec types.DataKeySpec - // The length of the data key in bytes. For example, use the value 64 to generate a - // 512-bit data key (64 bytes is 512 bits). For common key lengths (128-bit and + // The length of the data key in bytes. For example, use the value 64 to generate + // a 512-bit data key (64 bytes is 512 bits). For common key lengths (128-bit and // 256-bit symmetric keys), we recommend that you use the KeySpec field instead of // this one. NumberOfBytes *int32 @@ -147,9 +129,8 @@ type GenerateDataKeyWithoutPlaintextOutput struct { // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. CiphertextBlob []byte - // The Amazon Resource Name (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that encrypted the data key. + // The Amazon Resource Name ( key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) of the KMS key that encrypted the data key. KeyId *string // Metadata pertaining to the operation's result. @@ -209,6 +190,9 @@ func (c *Client) addOperationGenerateDataKeyWithoutPlaintextMiddlewares(stack *m if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyWithoutPlaintext(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go index 43b03293..f46a0539 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go @@ -11,30 +11,27 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Generates a hash-based message authentication code (HMAC) for a message using an -// HMAC KMS key and a MAC algorithm that the key supports. HMAC KMS keys and the -// HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104 -// (https://datatracker.ietf.org/doc/html/rfc2104). You can use value that -// GenerateMac returns in the VerifyMac operation to demonstrate that the original -// message has not changed. Also, because a secret key is used to create the hash, -// you can verify that the party that generated the hash has the required secret -// key. You can also use the raw result to implement HMAC-based algorithms such as -// key derivation functions. This operation is part of KMS support for HMAC KMS -// keys. For details, see HMAC keys in KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the Key -// Management Service Developer Guide . Best practices recommend that you limit the -// time during which any signing mechanism, including an HMAC, is effective. This -// deters an attack where the actor uses a signed message to establish validity -// repeatedly or long after the message is superseded. HMAC tags do not include a -// timestamp, but you can include a timestamp in the token or message to help you -// detect when its time to refresh the HMAC. The KMS key that you use for this -// operation must be in a compatible key state. For details, see Key states of KMS -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in -// the Key Management Service Developer Guide. Cross-account use: Yes. To perform -// this operation with a KMS key in a different Amazon Web Services account, -// specify the key ARN or alias ARN in the value of the KeyId parameter. Required -// permissions: kms:GenerateMac -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Generates a hash-based message authentication code (HMAC) for a message using +// an HMAC KMS key and a MAC algorithm that the key supports. HMAC KMS keys and the +// HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104) +// . You can use value that GenerateMac returns in the VerifyMac operation to +// demonstrate that the original message has not changed. Also, because a secret +// key is used to create the hash, you can verify that the party that generated the +// hash has the required secret key. You can also use the raw result to implement +// HMAC-based algorithms such as key derivation functions. This operation is part +// of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) +// in the Key Management Service Developer Guide . Best practices recommend that +// you limit the time during which any signing mechanism, including an HMAC, is +// effective. This deters an attack where the actor uses a signed message to +// establish validity repeatedly or long after the message is superseded. HMAC tags +// do not include a timestamp, but you can include a timestamp in the token or +// message to help you detect when its time to refresh the HMAC. The KMS key that +// you use for this operation must be in a compatible key state. For details, see +// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: Yes. To +// perform this operation with a KMS key in a different Amazon Web Services +// account, specify the key ARN or alias ARN in the value of the KeyId parameter. +// Required permissions: kms:GenerateMac (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: VerifyMac func (c *Client) GenerateMac(ctx context.Context, params *GenerateMacInput, optFns ...func(*Options)) (*GenerateMacOutput, error) { if params == nil { @@ -54,9 +51,9 @@ func (c *Client) GenerateMac(ctx context.Context, params *GenerateMacInput, optF type GenerateMacInput struct { // The HMAC KMS key to use in the operation. The MAC algorithm computes the HMAC - // for the message and the key as described in RFC 2104 - // (https://datatracker.ietf.org/doc/html/rfc2104). To identify an HMAC KMS key, - // use the DescribeKey operation and see the KeySpec field in the response. + // for the message and the key as described in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104) + // . To identify an HMAC KMS key, use the DescribeKey operation and see the KeySpec + // field in the response. // // This member is required. KeyId *string @@ -79,10 +76,8 @@ type GenerateMacInput struct { // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string @@ -96,7 +91,7 @@ type GenerateMacOutput struct { // The hash-based message authentication code (HMAC) that was generated for the // specified message, HMAC KMS key, and MAC algorithm. This is the standard, raw - // HMAC defined in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104). + // HMAC defined in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104) . Mac []byte // The MAC algorithm that was used to generate the HMAC. @@ -159,6 +154,9 @@ func (c *Client) addOperationGenerateMacMiddlewares(stack *middleware.Stack, opt if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateMac(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go index 7ed73007..9d226e43 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go @@ -6,6 +6,7 @@ import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -14,19 +15,23 @@ import ( // NumberOfBytes parameter to specify the length of the random byte string. There // is no default value for string length. By default, the random byte string is // generated in KMS. To generate the byte string in the CloudHSM cluster associated -// with an CloudHSM key store, use the CustomKeyStoreId parameter. Applications in -// Amazon Web Services Nitro Enclaves can call this operation by using the Amazon -// Web Services Nitro Enclaves Development Kit -// (https://github.com/aws/aws-nitro-enclaves-sdk-c). For information about the -// supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) +// with an CloudHSM key store, use the CustomKeyStoreId parameter. GenerateRandom +// also supports Amazon Web Services Nitro Enclaves (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave.html) +// , which provide an isolated compute environment in Amazon EC2. To call +// GenerateRandom for a Nitro enclave, use the Amazon Web Services Nitro Enclaves +// SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk) +// or any Amazon Web Services SDK. Use the Recipient parameter to provide the +// attestation document for the enclave. Instead of plaintext bytes, the response +// includes the plaintext bytes encrypted under the public key from the attestation +// document ( CiphertextForRecipient ).For information about the interaction +// between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services +// Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) // in the Key Management Service Developer Guide. For more information about // entropy and random number generation, see Key Management Service Cryptographic -// Details (https://docs.aws.amazon.com/kms/latest/cryptographic-details/). +// Details (https://docs.aws.amazon.com/kms/latest/cryptographic-details/) . // Cross-account use: Not applicable. GenerateRandom does not use any // account-specific resources, such as KMS keys. Required permissions: -// kms:GenerateRandom -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// kms:GenerateRandom (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (IAM policy) func (c *Client) GenerateRandom(ctx context.Context, params *GenerateRandomInput, optFns ...func(*Options)) (*GenerateRandomOutput, error) { if params == nil { @@ -45,23 +50,51 @@ func (c *Client) GenerateRandom(ctx context.Context, params *GenerateRandomInput type GenerateRandomInput struct { - // Generates the random byte string in the CloudHSM cluster that is associated with - // the specified CloudHSM key store. To find the ID of a custom key store, use the - // DescribeCustomKeyStores operation. External key store IDs are not valid for this - // parameter. If you specify the ID of an external key store, GenerateRandom throws - // an UnsupportedOperationException. + // Generates the random byte string in the CloudHSM cluster that is associated + // with the specified CloudHSM key store. To find the ID of a custom key store, use + // the DescribeCustomKeyStores operation. External key store IDs are not valid for + // this parameter. If you specify the ID of an external key store, GenerateRandom + // throws an UnsupportedOperationException . CustomKeyStoreId *string // The length of the random byte string. This parameter is required. NumberOfBytes *int32 + // A signed attestation document (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc) + // from an Amazon Web Services Nitro enclave and the encryption algorithm to use + // with the enclave's public key. The only valid encryption algorithm is + // RSAES_OAEP_SHA_256 . This parameter only supports attestation documents for + // Amazon Web Services Nitro Enclaves. To include this parameter, use the Amazon + // Web Services Nitro Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk) + // or any Amazon Web Services SDK. When you use this parameter, instead of + // returning plaintext bytes, KMS encrypts the plaintext bytes under the public key + // in the attestation document, and returns the resulting ciphertext in the + // CiphertextForRecipient field in the response. This ciphertext can be decrypted + // only with the private key in the enclave. The Plaintext field in the response + // is null or empty. For information about the interaction between KMS and Amazon + // Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) + // in the Key Management Service Developer Guide. + Recipient *types.RecipientInfo + noSmithyDocumentSerde } type GenerateRandomOutput struct { + // The plaintext random bytes encrypted with the public key from the Nitro + // enclave. This ciphertext can be decrypted only by using a private key in the + // Nitro enclave. This field is included in the response only when the Recipient + // parameter in the request includes a valid attestation document from an Amazon + // Web Services Nitro enclave. For information about the interaction between KMS + // and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro + // Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) + // in the Key Management Service Developer Guide. + CiphertextForRecipient []byte + // The random byte string. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. If the + // response includes the CiphertextForRecipient field, the Plaintext field is null + // or empty. Plaintext []byte // Metadata pertaining to the operation's result. @@ -118,6 +151,9 @@ func (c *Client) addOperationGenerateRandomMiddlewares(stack *middleware.Stack, if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateRandom(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go index ca9e135b..ce8e8e87 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go @@ -12,8 +12,7 @@ import ( // Gets a key policy attached to the specified KMS key. Cross-account use: No. You // cannot perform this operation on a KMS key in a different Amazon Web Services -// account. Required permissions: kms:GetKeyPolicy -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// account. Required permissions: kms:GetKeyPolicy (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: PutKeyPolicy func (c *Client) GetKeyPolicy(ctx context.Context, params *GetKeyPolicyInput, optFns ...func(*Options)) (*GetKeyPolicyOutput, error) { if params == nil { @@ -34,21 +33,16 @@ type GetKeyPolicyInput struct { // Gets the key policy for the specified KMS key. Specify the key ID or key ARN of // the KMS key. For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key - // ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string - // Specifies the name of the key policy. The only valid name is default. To get the - // names of key policies, use ListKeyPolicies. + // Specifies the name of the key policy. The only valid name is default . To get + // the names of key policies, use ListKeyPolicies . // // This member is required. PolicyName *string @@ -118,6 +112,9 @@ func (c *Client) addOperationGetKeyPolicyMiddlewares(stack *middleware.Stack, op if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetKeyPolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go index 0d8c4320..883ecb5b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go @@ -11,61 +11,46 @@ import ( ) // Gets a Boolean value that indicates whether automatic rotation of the key -// material -// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) is -// enabled for the specified KMS key. When you enable automatic rotation for -// customer managed KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), -// KMS rotates the key material of the KMS key one year (approximately 365 days) +// material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) +// is enabled for the specified KMS key. When you enable automatic rotation for +// customer managed KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) +// , KMS rotates the key material of the KMS key one year (approximately 365 days) // from the enable date and every year thereafter. You can monitor rotation of the // key material for your KMS keys in CloudTrail and Amazon CloudWatch. Automatic -// key rotation is supported only on symmetric encryption KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks). -// You cannot enable automatic rotation of asymmetric KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), -// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), -// KMS keys with imported key material -// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or -// KMS keys in a custom key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// To enable or disable automatic rotation of a set of related multi-Region keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), -// set the property on the primary key.. You can enable (EnableKeyRotation) and -// disable automatic rotation (DisableKeyRotation) of the key material in customer -// managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// key rotation is supported only on symmetric encryption KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks) +// . You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// , HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) +// , KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) +// , or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// . To enable or disable automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate) +// , set the property on the primary key.. You can enable ( EnableKeyRotation ) and +// disable automatic rotation ( DisableKeyRotation ) of the key material in +// customer managed KMS keys. Key material rotation of Amazon Web Services managed +// KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) // is not configurable. KMS always rotates the key material in Amazon Web Services // managed KMS keys every year. The key rotation status for Amazon Web Services -// managed KMS keys is always true. In May 2022, KMS changed the rotation schedule +// managed KMS keys is always true . In May 2022, KMS changed the rotation schedule // for Amazon Web Services managed keys from every three years to every year. For -// details, see EnableKeyRotation. The KMS key that you use for this operation must -// be in a compatible key state. For details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. +// details, see EnableKeyRotation . The KMS key that you use for this operation +// must be in a compatible key state. For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. +// - Disabled: The key rotation status does not change when you disable a KMS +// key. However, while the KMS key is disabled, KMS does not rotate the key +// material. When you re-enable the KMS key, rotation resumes. If the key material +// in the re-enabled KMS key hasn't been rotated in one year, KMS rotates it +// immediately, and every year thereafter. If it's been less than a year since the +// key material in the re-enabled KMS key was rotated, the KMS key resumes its +// prior rotation schedule. +// - Pending deletion: While a KMS key is pending deletion, its key rotation +// status is false and KMS does not rotate the key material. If you cancel the +// deletion, the original key rotation status returns to true . // -// * Disabled: The key rotation status -// does not change when you disable a KMS key. However, while the KMS key is -// disabled, KMS does not rotate the key material. When you re-enable the KMS key, -// rotation resumes. If the key material in the re-enabled KMS key hasn't been -// rotated in one year, KMS rotates it immediately, and every year thereafter. If -// it's been less than a year since the key material in the re-enabled KMS key was -// rotated, the KMS key resumes its prior rotation schedule. -// -// * Pending deletion: -// While a KMS key is pending deletion, its key rotation status is false and KMS -// does not rotate the key material. If you cancel the deletion, the original key -// rotation status returns to true. -// -// Cross-account use: Yes. To perform this -// operation on a KMS key in a different Amazon Web Services account, specify the -// key ARN in the value of the KeyId parameter. Required permissions: -// kms:GetKeyRotationStatus -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Cross-account use: Yes. To perform this operation on a KMS key in a different +// Amazon Web Services account, specify the key ARN in the value of the KeyId +// parameter. Required permissions: kms:GetKeyRotationStatus (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * DisableKeyRotation -// -// * EnableKeyRotation +// - DisableKeyRotation +// - EnableKeyRotation func (c *Client) GetKeyRotationStatus(ctx context.Context, params *GetKeyRotationStatusInput, optFns ...func(*Options)) (*GetKeyRotationStatusOutput, error) { if params == nil { params = &GetKeyRotationStatusInput{} @@ -86,15 +71,10 @@ type GetKeyRotationStatusInput struct { // Gets the rotation status for the specified KMS key. Specify the key ID or key // ARN of the KMS key. To specify a KMS key in a different Amazon Web Services // account, you must use the key ARN. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -164,6 +144,9 @@ func (c *Client) addOperationGetKeyRotationStatusMiddlewares(stack *middleware.S if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetKeyRotationStatus(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go index 36f6eb2d..f30722cf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go @@ -12,35 +12,51 @@ import ( "time" ) -// Returns the items you need to import key material into a symmetric encryption -// KMS key. For more information about importing key material into KMS, see -// Importing key material -// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in -// the Key Management Service Developer Guide. This operation returns a public key -// and an import token. Use the public key to encrypt the symmetric key material. -// Store the import token to send with a subsequent ImportKeyMaterial request. You -// must specify the key ID of the symmetric encryption KMS key into which you will -// import key material. The KMS key Origin must be EXTERNAL. You must also specify -// the wrapping algorithm and type of wrapping key (public key) that you will use -// to encrypt the key material. You cannot perform this operation on an asymmetric -// KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services -// account. To import key material, you must use the public key and import token -// from the same response. These items are valid for 24 hours. The expiration date -// and time appear in the GetParametersForImport response. You cannot use an -// expired token in an ImportKeyMaterial request. If your key and token expire, -// send another GetParametersForImport request. The KMS key that you use for this -// operation must be in a compatible key state. For details, see Key states of KMS -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in -// the Key Management Service Developer Guide. Cross-account use: No. You cannot -// perform this operation on a KMS key in a different Amazon Web Services account. -// Required permissions: kms:GetParametersForImport -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) Related operations: +// Returns the public key and an import token you need to import or reimport key +// material for a KMS key. By default, KMS keys are created with key material that +// KMS generates. This operation supports Importing key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) +// , an advanced feature that lets you generate and import the cryptographic key +// material for a KMS key. For more information about importing key material into +// KMS, see Importing key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) +// in the Key Management Service Developer Guide. Before calling +// GetParametersForImport , use the CreateKey operation with an Origin value of +// EXTERNAL to create a KMS key with no key material. You can import key material +// for a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, +// or asymmetric signing KMS key. You can also import key material into a +// multi-Region key of any supported type. However, you can't import key material +// into a KMS key in a custom key store . You can also use GetParametersForImport +// to get a public key and import token to reimport the original key material into +// a KMS key whose key material expired or was deleted. GetParametersForImport +// returns the items that you need to import your key material. +// - The public key (or "wrapping key") of an RSA key pair that KMS generates. +// You will use this public key to encrypt ("wrap") your key material while it's in +// transit to KMS. +// - A import token that ensures that KMS can decrypt your key material and +// associate it with the correct KMS key. // -// * ImportKeyMaterial +// The public key and its import token are permanently linked and must be used +// together. Each public key and import token set is valid for 24 hours. The +// expiration date and time appear in the ParametersValidTo field in the +// GetParametersForImport response. You cannot use an expired public key or import +// token in an ImportKeyMaterial request. If your key and token expire, send +// another GetParametersForImport request. GetParametersForImport requires the +// following information: +// - The key ID of the KMS key for which you are importing the key material. +// - The key spec of the public key ("wrapping key") that you will use to +// encrypt your key material during import. +// - The wrapping algorithm that you will use with the public key to encrypt +// your key material. // -// * -// DeleteImportedKeyMaterial +// You can use the same or a different public key spec and wrapping algorithm each +// time you import or reimport the same key material. The KMS key that you use for +// this operation must be in a compatible key state. For details, see Key states +// of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:GetParametersForImport (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// - ImportKeyMaterial +// - DeleteImportedKeyMaterial func (c *Client) GetParametersForImport(ctx context.Context, params *GetParametersForImportInput, optFns ...func(*Options)) (*GetParametersForImportOutput, error) { if params == nil { params = &GetParametersForImportInput{} @@ -58,32 +74,48 @@ func (c *Client) GetParametersForImport(ctx context.Context, params *GetParamete type GetParametersForImportInput struct { - // The identifier of the symmetric encryption KMS key into which you will import - // key material. The Origin of the KMS key must be EXTERNAL. Specify the key ID or - // key ARN of the KMS key. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // The identifier of the KMS key that will be associated with the imported key + // material. The Origin of the KMS key must be EXTERNAL . All KMS key types are + // supported, including multi-Region keys. However, you cannot import key material + // into a KMS key in a custom key store. Specify the key ID or key ARN of the KMS + // key. For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string - // The algorithm you will use to encrypt the key material before importing it with - // ImportKeyMaterial. For more information, see Encrypt the Key Material - // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-encrypt-key-material.html) - // in the Key Management Service Developer Guide. + // The algorithm you will use with the RSA public key ( PublicKey ) in the response + // to protect your key material during import. For more information, see Select a + // wrapping algorithm in the Key Management Service Developer Guide. For RSA_AES + // wrapping algorithms, you encrypt your key material with an AES key that you + // generate, then encrypt your AES key with the RSA public key from KMS. For RSAES + // wrapping algorithms, you encrypt your key material directly with the RSA public + // key from KMS. The wrapping algorithms that you can use depend on the type of key + // material that you are importing. To import an RSA private key, you must use an + // RSA_AES wrapping algorithm. + // - RSA_AES_KEY_WRAP_SHA_256 — Supported for wrapping RSA and ECC key material. + // - RSA_AES_KEY_WRAP_SHA_1 — Supported for wrapping RSA and ECC key material. + // - RSAES_OAEP_SHA_256 — Supported for all types of key material, except RSA + // key material (private key). You cannot use the RSAES_OAEP_SHA_256 wrapping + // algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key + // material. + // - RSAES_OAEP_SHA_1 — Supported for all types of key material, except RSA key + // material (private key). You cannot use the RSAES_OAEP_SHA_1 wrapping algorithm + // with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material. + // - RSAES_PKCS1_V1_5 (Deprecated) — Supported only for symmetric encryption key + // material (and only in legacy mode). // // This member is required. WrappingAlgorithm types.AlgorithmSpec - // The type of wrapping key (public key) to return in the response. Only 2048-bit - // RSA public keys are supported. + // The type of RSA public key to return in the response. You will use this + // wrapping key with the specified wrapping algorithm to protect your key material + // during import. Use the longest RSA wrapping key that is practical. You cannot + // use an RSA_2048 public key to directly wrap an ECC_NIST_P521 private key. + // Instead, use an RSA_AES wrapping algorithm or choose a longer RSA public key. // // This member is required. WrappingKeySpec types.WrappingKeySpec @@ -96,19 +128,18 @@ type GetParametersForImportOutput struct { // The import token to send in a subsequent ImportKeyMaterial request. ImportToken []byte - // The Amazon Resource Name (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key to use in a subsequent ImportKeyMaterial request. This is the + // The Amazon Resource Name ( key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) of the KMS key to use in a subsequent ImportKeyMaterial request. This is the // same KMS key specified in the GetParametersForImport request. KeyId *string // The time at which the import token and public key are no longer valid. After - // this time, you cannot use them to make an ImportKeyMaterial request and you must - // send another GetParametersForImport request to get new ones. + // this time, you cannot use them to make an ImportKeyMaterial request and you + // must send another GetParametersForImport request to get new ones. ParametersValidTo *time.Time // The public key to use to encrypt the key material before importing it with - // ImportKeyMaterial. + // ImportKeyMaterial . PublicKey []byte // Metadata pertaining to the operation's result. @@ -168,6 +199,9 @@ func (c *Client) addOperationGetParametersForImportMiddlewares(stack *middleware if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetParametersForImport(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go index cd093fad..732841e6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go @@ -16,52 +16,40 @@ import ( // kms:GetPublicKey permission can download the public key of an asymmetric KMS // key. You can share the public key to allow others to encrypt messages and verify // signatures outside of KMS. For information about asymmetric KMS keys, see -// Asymmetric KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Key Management Service Developer Guide. You do not need to download the // public key. Instead, you can use the public key within KMS by calling the -// Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric +// Encrypt , ReEncrypt , or Verify operations with the identifier of an asymmetric // KMS key. When you use the public key within KMS, you benefit from the // authentication, authorization, and logging that are part of every KMS operation. // You also reduce of risk of encrypting data that cannot be decrypted. These // features are not effective outside of KMS. To help you use the public key safely // outside of KMS, GetPublicKey returns important information about the public key // in the response, including: +// - KeySpec (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeySpec) +// : The type of key material in the public key, such as RSA_4096 or +// ECC_NIST_P521 . +// - KeyUsage (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeyUsage) +// : Whether the key is used for encryption or signing. +// - EncryptionAlgorithms (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-EncryptionAlgorithms) +// or SigningAlgorithms (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-SigningAlgorithms) +// : A list of the encryption algorithms or the signing algorithms for the key. // -// * KeySpec -// (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeySpec): -// The type of key material in the public key, such as RSA_4096 or -// ECC_NIST_P521. -// -// * KeyUsage -// (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeyUsage): -// Whether the key is used for encryption or signing. -// -// * EncryptionAlgorithms -// (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-EncryptionAlgorithms) -// or SigningAlgorithms -// (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-SigningAlgorithms): -// A list of the encryption algorithms or the signing algorithms for the -// key. -// -// Although KMS cannot enforce these restrictions on external operations, it -// is crucial that you use this information to prevent the public key from being -// used improperly. For example, you can prevent a public signing key from being -// used encrypt data, or prevent a public key from being used with an encryption +// Although KMS cannot enforce these restrictions on external operations, it is +// crucial that you use this information to prevent the public key from being used +// improperly. For example, you can prevent a public signing key from being used +// encrypt data, or prevent a public key from being used with an encryption // algorithm that is not supported by KMS. You can also avoid errors, such as using // the wrong signing algorithm in a verification operation. To verify a signature // outside of KMS with an SM2 public key (China Regions only), you must specify the // distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing -// ID. For more information, see Offline verification with SM2 key pairs -// (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification). -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: Yes. To perform this -// operation with a KMS key in a different Amazon Web Services account, specify the -// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: -// kms:GetPublicKey -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// ID. For more information, see Offline verification with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification) +// . The KMS key that you use for this operation must be in a compatible key state. +// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: Yes. To +// perform this operation with a KMS key in a different Amazon Web Services +// account, specify the key ARN or alias ARN in the value of the KeyId parameter. +// Required permissions: kms:GetPublicKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: CreateKey func (c *Client) GetPublicKey(ctx context.Context, params *GetPublicKeyInput, optFns ...func(*Options)) (*GetPublicKeyOutput, error) { if params == nil { @@ -80,36 +68,25 @@ func (c *Client) GetPublicKey(ctx context.Context, params *GetPublicKeyInput, op type GetPublicKeyInput struct { - // Identifies the asymmetric KMS key that includes the public key. To specify a KMS - // key, use its key ID, key ARN, alias name, or alias ARN. When using an alias - // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web + // Identifies the asymmetric KMS key that includes the public key. To specify a + // KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias + // name, prefix it with "alias/" . To specify a KMS key in a different Amazon Web // Services account, you must use the key ARN or alias ARN. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * - // Alias name: alias/ExampleAlias - // - // * Alias ARN: - // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key - // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias - // ARN, use ListAliases. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Alias name: alias/ExampleAlias + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . To + // get the alias name and alias ARN, use ListAliases . // // This member is required. KeyId *string // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string @@ -129,31 +106,29 @@ type GetPublicKeyOutput struct { // The encryption algorithms that KMS supports for this key. This information is // critical. If a public key encrypts data outside of KMS by using an unsupported // encryption algorithm, the ciphertext cannot be decrypted. This field appears in - // the response only when the KeyUsage of the public key is ENCRYPT_DECRYPT. + // the response only when the KeyUsage of the public key is ENCRYPT_DECRYPT . EncryptionAlgorithms []types.EncryptionAlgorithmSpec - // The Amazon Resource Name (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the asymmetric KMS key from which the public key was downloaded. + // The Amazon Resource Name ( key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) of the asymmetric KMS key from which the public key was downloaded. KeyId *string // The type of the of the public key that was downloaded. KeySpec types.KeySpec // The permitted use of the public key. Valid values are ENCRYPT_DECRYPT or - // SIGN_VERIFY. This information is critical. If a public key with SIGN_VERIFY key - // usage encrypts data outside of KMS, the ciphertext cannot be decrypted. + // SIGN_VERIFY . This information is critical. If a public key with SIGN_VERIFY + // key usage encrypts data outside of KMS, the ciphertext cannot be decrypted. KeyUsage types.KeyUsageType - // The exported public key. The value is a DER-encoded X.509 public key, also known - // as SubjectPublicKeyInfo (SPKI), as defined in RFC 5280 - // (https://tools.ietf.org/html/rfc5280). When you use the HTTP API or the Amazon - // Web Services CLI, the value is Base64-encoded. Otherwise, it is not - // Base64-encoded. + // The exported public key. The value is a DER-encoded X.509 public key, also + // known as SubjectPublicKeyInfo (SPKI), as defined in RFC 5280 (https://tools.ietf.org/html/rfc5280) + // . When you use the HTTP API or the Amazon Web Services CLI, the value is + // Base64-encoded. Otherwise, it is not Base64-encoded. PublicKey []byte - // The signing algorithms that KMS supports for this key. This field appears in the - // response only when the KeyUsage of the public key is SIGN_VERIFY. + // The signing algorithms that KMS supports for this key. This field appears in + // the response only when the KeyUsage of the public key is SIGN_VERIFY . SigningAlgorithms []types.SigningAlgorithmSpec // Metadata pertaining to the operation's result. @@ -213,6 +188,9 @@ func (c *Client) addOperationGetPublicKeyMiddlewares(stack *middleware.Stack, op if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetPublicKey(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go index 17f45760..97423aa7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go @@ -12,63 +12,79 @@ import ( "time" ) -// Imports key material into an existing symmetric encryption KMS key that was -// created without key material. After you successfully import key material into a -// KMS key, you can reimport the same key material -// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material) -// into that KMS key, but you cannot import different key material. You cannot -// perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS -// key in a different Amazon Web Services account. For more information about -// creating KMS keys with no key material and then importing key material, see -// Importing Key Material -// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in -// the Key Management Service Developer Guide. Before using this operation, call -// GetParametersForImport. Its response includes a public key and an import token. -// Use the public key to encrypt the key material. Then, submit the import token -// from the same GetParametersForImport response. When calling this operation, you -// must specify the following values: +// Imports or reimports key material into an existing KMS key that was created +// without key material. ImportKeyMaterial also sets the expiration model and +// expiration date of the imported key material. By default, KMS keys are created +// with key material that KMS generates. This operation supports Importing key +// material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) +// , an advanced feature that lets you generate and import the cryptographic key +// material for a KMS key. For more information about importing key material into +// KMS, see Importing key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) +// in the Key Management Service Developer Guide. After you successfully import key +// material into a KMS key, you can reimport the same key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material) +// into that KMS key, but you cannot import different key material. You might +// reimport key material to replace key material that expired or key material that +// you deleted. You might also reimport key material to change the expiration model +// or expiration date of the key material. Before reimporting key material, if +// necessary, call DeleteImportedKeyMaterial to delete the current imported key +// material. Each time you import key material into KMS, you can determine whether +// ( ExpirationModel ) and when ( ValidTo ) the key material expires. To change the +// expiration of your key material, you must import it again, either by calling +// ImportKeyMaterial or using the import features of the KMS console. Before +// calling ImportKeyMaterial : +// - Create or identify a KMS key with no key material. The KMS key must have an +// Origin value of EXTERNAL , which indicates that the KMS key is designed for +// imported key material. To create an new KMS key for imported key material, call +// the CreateKey operation with an Origin value of EXTERNAL . You can create a +// symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or +// asymmetric signing KMS key. You can also import key material into a +// multi-Region key of any supported type. However, you can't import key material +// into a KMS key in a custom key store . +// - Use the DescribeKey operation to verify that the KeyState of the KMS key is +// PendingImport , which indicates that the KMS key has no key material. If you +// are reimporting the same key material into an existing KMS key, you might need +// to call the DeleteImportedKeyMaterial to delete its existing key material. +// - Call the GetParametersForImport operation to get a public key and import +// token set for importing key material. +// - Use the public key in the GetParametersForImport response to encrypt your +// key material. // -// * The key ID or key ARN of a KMS key with no -// key material. Its Origin must be EXTERNAL. To create a KMS key with no key -// material, call CreateKey and set the value of its Origin parameter to EXTERNAL. -// To get the Origin of a KMS key, call DescribeKey.) +// Then, in an ImportKeyMaterial request, you submit your encrypted key material +// and import token. When calling this operation, you must specify the following +// values: +// - The key ID or key ARN of the KMS key to associate with the imported key +// material. Its Origin must be EXTERNAL and its KeyState must be PendingImport . +// You cannot perform this operation on a KMS key in a custom key store , or on a +// KMS key in a different Amazon Web Services account. To get the Origin and +// KeyState of a KMS key, call DescribeKey . +// - The encrypted key material. +// - The import token that GetParametersForImport returned. You must use a public +// key and token from the same GetParametersForImport response. +// - Whether the key material expires ( ExpirationModel ) and, if so, when ( +// ValidTo ). For help with this choice, see Setting an expiration time (https://docs.aws.amazon.com/en_us/kms/latest/developerguide/importing-keys.html#importing-keys-expiration) +// in the Key Management Service Developer Guide. If you set an expiration date, +// KMS deletes the key material from the KMS key on the specified date, making the +// KMS key unusable. To use the KMS key in cryptographic operations again, you must +// reimport the same key material. However, you can delete and reimport the key +// material at any time, including before the key material expires. Each time you +// reimport, you can eliminate or reset the expiration time. // -// * The encrypted key -// material. To get the public key to encrypt the key material, call -// GetParametersForImport. -// -// * The import token that GetParametersForImport -// returned. You must use a public key and token from the same -// GetParametersForImport response. -// -// * Whether the key material expires -// (ExpirationModel) and, if so, when (ValidTo). If you set an expiration date, on -// the specified date, KMS deletes the key material from the KMS key, making the -// KMS key unusable. To use the KMS key in cryptographic operations again, you must -// reimport the same key material. The only way to change the expiration model or -// expiration date is by reimporting the same key material and specifying a new -// expiration date. -// -// When this operation is successful, the key state of the KMS -// key changes from PendingImport to Enabled, and you can use the KMS key. If this -// operation fails, use the exception to help determine the problem. If the error -// is related to the key material, the import token, or wrapping key, use -// GetParametersForImport to get a new public key and import token for the KMS key -// and repeat the import procedure. For help, see How To Import Key Material -// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#importing-keys-overview) +// When this operation is successful, the key state of the KMS key changes from +// PendingImport to Enabled , and you can use the KMS key in cryptographic +// operations. If this operation fails, use the exception to help determine the +// problem. If the error is related to the key material, the import token, or +// wrapping key, use GetParametersForImport to get a new public key and import +// token for the KMS key and repeat the import procedure. For help, see How To +// Import Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#importing-keys-overview) // in the Key Management Service Developer Guide. The KMS key that you use for this // operation must be in a compatible key state. For details, see Key states of KMS // keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in // the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. -// Required permissions: kms:ImportKeyMaterial -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:ImportKeyMaterial (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * DeleteImportedKeyMaterial -// -// * -// GetParametersForImport +// - DeleteImportedKeyMaterial +// - GetParametersForImport func (c *Client) ImportKeyMaterial(ctx context.Context, params *ImportKeyMaterialInput, optFns ...func(*Options)) (*ImportKeyMaterialOutput, error) { if params == nil { params = &ImportKeyMaterialInput{} @@ -86,9 +102,10 @@ func (c *Client) ImportKeyMaterial(ctx context.Context, params *ImportKeyMateria type ImportKeyMaterialInput struct { - // The encrypted key material to import. The key material must be encrypted with - // the public wrapping key that GetParametersForImport returned, using the wrapping - // algorithm that you specified in the same GetParametersForImport request. + // The encrypted key material to import. The key material must be encrypted under + // the public wrapping key that GetParametersForImport returned, using the + // wrapping algorithm that you specified in the same GetParametersForImport + // request. // // This member is required. EncryptedKeyMaterial []byte @@ -100,44 +117,43 @@ type ImportKeyMaterialInput struct { // This member is required. ImportToken []byte - // The identifier of the symmetric encryption KMS key that receives the imported - // key material. This must be the same KMS key specified in the KeyID parameter of - // the corresponding GetParametersForImport request. The Origin of the KMS key must - // be EXTERNAL. You cannot perform this operation on an asymmetric KMS key, an HMAC - // KMS key, a KMS key in a custom key store, or on a KMS key in a different Amazon - // Web Services account Specify the key ID or key ARN of the KMS key. For - // example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // The identifier of the KMS key that will be associated with the imported key + // material. This must be the same KMS key specified in the KeyID parameter of the + // corresponding GetParametersForImport request. The Origin of the KMS key must be + // EXTERNAL and its KeyState must be PendingImport . The KMS key can be a symmetric + // encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric + // signing KMS key, including a multi-Region key of any supported type. You cannot + // perform this operation on a KMS key in a custom key store, or on a KMS key in a + // different Amazon Web Services account. Specify the key ID or key ARN of the KMS + // key. For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string - // Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES. - // When the value of ExpirationModel is KEY_MATERIAL_EXPIRES, you must specify a - // value for the ValidTo parameter. When value is KEY_MATERIAL_DOES_NOT_EXPIRE, you - // must omit the ValidTo parameter. You cannot change the ExpirationModel or - // ValidTo values for the current import after the request completes. To change - // either value, you must delete (DeleteImportedKeyMaterial) and reimport the key - // material. + // Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES + // . For help with this choice, see Setting an expiration time (https://docs.aws.amazon.com/en_us/kms/latest/developerguide/importing-keys.html#importing-keys-expiration) + // in the Key Management Service Developer Guide. When the value of ExpirationModel + // is KEY_MATERIAL_EXPIRES , you must specify a value for the ValidTo parameter. + // When value is KEY_MATERIAL_DOES_NOT_EXPIRE , you must omit the ValidTo + // parameter. You cannot change the ExpirationModel or ValidTo values for the + // current import after the request completes. To change either value, you must + // reimport the key material. ExpirationModel types.ExpirationModelType // The date and time when the imported key material expires. This parameter is - // required when the value of the ExpirationModel parameter is - // KEY_MATERIAL_EXPIRES. Otherwise it is not valid. The value of this parameter - // must be a future date and time. The maximum value is 365 days from the request - // date. When the key material expires, KMS deletes the key material from the KMS - // key. Without its key material, the KMS key is unusable. To use the KMS key in - // cryptographic operations, you must reimport the same key material. You cannot - // change the ExpirationModel or ValidTo values for the current import after the - // request completes. To change either value, you must delete - // (DeleteImportedKeyMaterial) and reimport the key material. + // required when the value of the ExpirationModel parameter is KEY_MATERIAL_EXPIRES + // . Otherwise it is not valid. The value of this parameter must be a future date + // and time. The maximum value is 365 days from the request date. When the key + // material expires, KMS deletes the key material from the KMS key. Without its key + // material, the KMS key is unusable. To use the KMS key in cryptographic + // operations, you must reimport the same key material. You cannot change the + // ExpirationModel or ValidTo values for the current import after the request + // completes. To change either value, you must delete ( DeleteImportedKeyMaterial ) + // and reimport the key material. ValidTo *time.Time noSmithyDocumentSerde @@ -201,6 +217,9 @@ func (c *Client) addOperationImportKeyMaterialMiddlewares(stack *middleware.Stac if err = stack.Initialize.Add(newServiceMetadataMiddleware_opImportKeyMaterial(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go index 10f93450..d5c85d92 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go @@ -13,32 +13,25 @@ import ( ) // Gets a list of aliases in the caller's Amazon Web Services account and region. -// For more information about aliases, see CreateAlias. By default, the ListAliases -// operation returns all aliases in the account and region. To get only the aliases -// associated with a particular KMS key, use the KeyId parameter. The ListAliases -// response can include aliases that you created and associated with your customer -// managed keys, and aliases that Amazon Web Services created and associated with -// Amazon Web Services managed keys in your account. You can recognize Amazon Web -// Services aliases because their names have the format aws/, such as aws/dynamodb. -// The response might also include aliases that have no TargetKeyId field. These -// are predefined aliases that Amazon Web Services has created but has not yet -// associated with a KMS key. Aliases that Amazon Web Services creates in your -// account, including predefined aliases, do not count against your KMS aliases -// quota -// (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#aliases-limit). -// Cross-account use: No. ListAliases does not return aliases in other Amazon Web -// Services accounts. Required permissions: kms:ListAliases -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) For details, see Controlling access to aliases -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) +// For more information about aliases, see CreateAlias . By default, the +// ListAliases operation returns all aliases in the account and region. To get only +// the aliases associated with a particular KMS key, use the KeyId parameter. The +// ListAliases response can include aliases that you created and associated with +// your customer managed keys, and aliases that Amazon Web Services created and +// associated with Amazon Web Services managed keys in your account. You can +// recognize Amazon Web Services aliases because their names have the format aws/ , +// such as aws/dynamodb . The response might also include aliases that have no +// TargetKeyId field. These are predefined aliases that Amazon Web Services has +// created but has not yet associated with a KMS key. Aliases that Amazon Web +// Services creates in your account, including predefined aliases, do not count +// against your KMS aliases quota (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#aliases-limit) +// . Cross-account use: No. ListAliases does not return aliases in other Amazon +// Web Services accounts. Required permissions: kms:ListAliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) For details, see Controlling access to aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) // in the Key Management Service Developer Guide. Related operations: -// -// * -// CreateAlias -// -// * DeleteAlias -// -// * UpdateAlias +// - CreateAlias +// - DeleteAlias +// - UpdateAlias func (c *Client) ListAliases(ctx context.Context, params *ListAliasesInput, optFns ...func(*Options)) (*ListAliasesOutput, error) { if params == nil { params = &ListAliasesInput{} @@ -60,15 +53,10 @@ type ListAliasesInput struct { // key in your Amazon Web Services account. This parameter is optional. If you omit // it, ListAliases returns all aliases in the account and Region. Specify the key // ID or key ARN of the KMS key. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . KeyId *string // Use this parameter to specify the maximum number of items to return. When this @@ -79,8 +67,8 @@ type ListAliasesInput struct { Limit *int32 // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. Marker *string noSmithyDocumentSerde @@ -155,6 +143,9 @@ func (c *Client) addOperationListAliasesMiddlewares(stack *middleware.Stack, opt if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAliases(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go index 21b11e12..23c4bf43 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go @@ -15,31 +15,22 @@ import ( // Gets a list of all grants for the specified KMS key. You must specify the KMS // key in all requests. You can filter the grant list by grant ID or grantee // principal. For detailed information about grants, including grant terminology, -// see Grants in KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key -// Management Service Developer Guide . For examples of working with grants in -// several programming languages, see Programming grants -// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// The GranteePrincipal field in the ListGrants response usually contains the user -// or role designated as the grantee principal in the grant. However, when the +// see Grants in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) +// in the Key Management Service Developer Guide . For examples of working with +// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html) +// . The GranteePrincipal field in the ListGrants response usually contains the +// user or role designated as the grantee principal in the grant. However, when the // grantee principal in the grant is an Amazon Web Services service, the -// GranteePrincipal field contains the service principal -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), -// which might represent several different grantee principals. Cross-account use: +// GranteePrincipal field contains the service principal (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services) +// , which might represent several different grantee principals. Cross-account use: // Yes. To perform this operation on a KMS key in a different Amazon Web Services // account, specify the key ARN in the value of the KeyId parameter. Required -// permissions: kms:ListGrants -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// permissions: kms:ListGrants (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * CreateGrant -// -// * ListRetirableGrants -// -// * -// RetireGrant -// -// * RevokeGrant +// - CreateGrant +// - ListRetirableGrants +// - RetireGrant +// - RevokeGrant func (c *Client) ListGrants(ctx context.Context, params *ListGrantsInput, optFns ...func(*Options)) (*ListGrantsOutput, error) { if params == nil { params = &ListGrantsInput{} @@ -60,15 +51,10 @@ type ListGrantsInput struct { // Returns only grants for the specified KMS key. This parameter is required. // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a // different Amazon Web Services account, you must use the key ARN. For example: - // - // * - // Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -89,8 +75,8 @@ type ListGrantsInput struct { Limit *int32 // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. Marker *string noSmithyDocumentSerde @@ -168,6 +154,9 @@ func (c *Client) addOperationListGrantsMiddlewares(stack *middleware.Stack, opti if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListGrants(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go index a6c31313..0c879bc1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go @@ -13,15 +13,12 @@ import ( // Gets the names of the key policies that are attached to a KMS key. This // operation is designed to get policy names that you can use in a GetKeyPolicy -// operation. However, the only valid policy name is default. Cross-account use: +// operation. However, the only valid policy name is default . Cross-account use: // No. You cannot perform this operation on a KMS key in a different Amazon Web -// Services account. Required permissions: kms:ListKeyPolicies -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Services account. Required permissions: kms:ListKeyPolicies (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * GetKeyPolicy -// -// * PutKeyPolicy +// - GetKeyPolicy +// - PutKeyPolicy func (c *Client) ListKeyPolicies(ctx context.Context, params *ListKeyPoliciesInput, optFns ...func(*Options)) (*ListKeyPoliciesOutput, error) { if params == nil { params = &ListKeyPoliciesInput{} @@ -41,15 +38,10 @@ type ListKeyPoliciesInput struct { // Gets the names of key policies for the specified KMS key. Specify the key ID or // key ARN of the KMS key. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -62,8 +54,8 @@ type ListKeyPoliciesInput struct { Limit *int32 // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. Marker *string noSmithyDocumentSerde @@ -75,7 +67,7 @@ type ListKeyPoliciesOutput struct { // for the Marker parameter in a subsequent request. NextMarker *string - // A list of key policy names. The only valid value is default. + // A list of key policy names. The only valid value is default . PolicyNames []string // A flag that indicates whether there are more items in the list. When this value @@ -141,6 +133,9 @@ func (c *Client) addOperationListKeyPoliciesMiddlewares(stack *middleware.Stack, if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListKeyPolicies(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go index c554f536..4a38d969 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go @@ -14,18 +14,12 @@ import ( // Gets a list of all KMS keys in the caller's Amazon Web Services account and // Region. Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. Required permissions: kms:ListKeys -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// a different Amazon Web Services account. Required permissions: kms:ListKeys (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (IAM policy) Related operations: -// -// * CreateKey -// -// * DescribeKey -// -// * ListAliases -// -// * -// ListResourceTags +// - CreateKey +// - DescribeKey +// - ListAliases +// - ListResourceTags func (c *Client) ListKeys(ctx context.Context, params *ListKeysInput, optFns ...func(*Options)) (*ListKeysOutput, error) { if params == nil { params = &ListKeysInput{} @@ -51,8 +45,8 @@ type ListKeysInput struct { Limit *int32 // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. Marker *string noSmithyDocumentSerde @@ -127,6 +121,9 @@ func (c *Client) addOperationListKeysMiddlewares(stack *middleware.Stack, option if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListKeys(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go index 02c74956..e0cce01c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go @@ -13,25 +13,17 @@ import ( ) // Returns all tags on the specified KMS key. For general information about tags, -// including the format and syntax, see Tagging Amazon Web Services resources -// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the Amazon -// Web Services General Reference. For information about using tags in KMS, see -// Tagging keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). -// Cross-account use: No. You cannot perform this operation on a KMS key in a +// including the format and syntax, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) +// in the Amazon Web Services General Reference. For information about using tags +// in KMS, see Tagging keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html) +// . Cross-account use: No. You cannot perform this operation on a KMS key in a // different Amazon Web Services account. Required permissions: -// kms:ListResourceTags -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// kms:ListResourceTags (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: -// -// * CreateKey -// -// * ReplicateKey -// -// * TagResource -// -// * -// UntagResource +// - CreateKey +// - ReplicateKey +// - TagResource +// - UntagResource func (c *Client) ListResourceTags(ctx context.Context, params *ListResourceTagsInput, optFns ...func(*Options)) (*ListResourceTagsOutput, error) { if params == nil { params = &ListResourceTagsInput{} @@ -51,14 +43,10 @@ type ListResourceTagsInput struct { // Gets tags on the specified KMS key. Specify the key ID or key ARN of the KMS // key. For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -71,9 +59,9 @@ type ListResourceTagsInput struct { Limit *int32 // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. Do not attempt to construct this value. Use only the value of - // NextMarker from the truncated response you just received. + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. Do not attempt to construct this value. Use only the + // value of NextMarker from the truncated response you just received. Marker *string noSmithyDocumentSerde @@ -88,9 +76,8 @@ type ListResourceTagsOutput struct { // A list of tags. Each tag consists of a tag key and a tag value. Tagging or // untagging a KMS key can allow or deny permission to the KMS key. For details, - // see ABAC for KMS - // (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key - // Management Service Developer Guide. + // see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) + // in the Key Management Service Developer Guide. Tags []types.Tag // A flag that indicates whether there are more items in the list. When this value @@ -156,6 +143,9 @@ func (c *Client) addOperationListResourceTagsMiddlewares(stack *middleware.Stack if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListResourceTags(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go index d15ead72..a6a29000 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go @@ -17,28 +17,20 @@ import ( // in your Amazon Web Services account. The grants that are returned include grants // for KMS keys in your Amazon Web Services account and other Amazon Web Services // accounts. You might use this operation to determine which grants you may retire. -// To retire a grant, use the RetireGrant operation. For detailed information about -// grants, including grant terminology, see Grants in KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key -// Management Service Developer Guide . For examples of working with grants in -// several programming languages, see Programming grants -// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// Cross-account use: You must specify a principal in your Amazon Web Services +// To retire a grant, use the RetireGrant operation. For detailed information +// about grants, including grant terminology, see Grants in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) +// in the Key Management Service Developer Guide . For examples of working with +// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html) +// . Cross-account use: You must specify a principal in your Amazon Web Services // account. However, this operation can return grants in any Amazon Web Services // account. You do not need kms:ListRetirableGrants permission (or any other // additional permission) in any Amazon Web Services account other than your own. -// Required permissions: kms:ListRetirableGrants -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:ListRetirableGrants (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (IAM policy) in your Amazon Web Services account. Related operations: -// -// * -// CreateGrant -// -// * ListGrants -// -// * RetireGrant -// -// * RevokeGrant +// - CreateGrant +// - ListGrants +// - RetireGrant +// - RevokeGrant func (c *Client) ListRetirableGrants(ctx context.Context, params *ListRetirableGrantsInput, optFns ...func(*Options)) (*ListRetirableGrantsOutput, error) { if params == nil { params = &ListRetirableGrantsInput{} @@ -58,14 +50,11 @@ type ListRetirableGrantsInput struct { // The retiring principal for which to list grants. Enter a principal in your // Amazon Web Services account. To specify the retiring principal, use the Amazon - // Resource Name (ARN) - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of - // an Amazon Web Services principal. Valid Amazon Web Services principals include - // Amazon Web Services accounts (root), IAM users, federated users, and assumed - // role users. For examples of the ARN syntax for specifying a principal, see - // Amazon Web Services Identity and Access Management (IAM) - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) - // in the Example ARNs section of the Amazon Web Services General Reference. + // Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // of an Amazon Web Services principal. Valid principals include Amazon Web + // Services accounts, IAM users, IAM roles, federated users, and assumed role + // users. For help with the ARN syntax for a principal, see IAM ARNs (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns) + // in the Identity and Access Management User Guide . // // This member is required. RetiringPrincipal *string @@ -78,8 +67,8 @@ type ListRetirableGrantsInput struct { Limit *int32 // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. Marker *string noSmithyDocumentSerde @@ -157,6 +146,9 @@ func (c *Client) addOperationListRetirableGrantsMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListRetirableGrants(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } @@ -169,8 +161,8 @@ func (c *Client) addOperationListRetirableGrantsMiddlewares(stack *middleware.St return nil } -// ListRetirableGrantsAPIClient is a client that implements the ListRetirableGrants -// operation. +// ListRetirableGrantsAPIClient is a client that implements the +// ListRetirableGrants operation. type ListRetirableGrantsAPIClient interface { ListRetirableGrants(context.Context, *ListRetirableGrantsInput, ...func(*Options)) (*ListRetirableGrantsOutput, error) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go index 9a785b2a..74825e2a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go @@ -11,18 +11,14 @@ import ( ) // Attaches a key policy to the specified KMS key. For more information about key -// policies, see Key Policies -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) in the -// Key Management Service Developer Guide. For help writing and formatting a JSON -// policy document, see the IAM JSON Policy Reference -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in -// the Identity and Access Management User Guide . For examples of adding a key -// policy in multiple programming languages, see Setting a key policy -// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-key-policies.html#put-policy) +// policies, see Key Policies (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) +// in the Key Management Service Developer Guide. For help writing and formatting a +// JSON policy document, see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) +// in the Identity and Access Management User Guide . For examples of adding a key +// policy in multiple programming languages, see Setting a key policy (https://docs.aws.amazon.com/kms/latest/developerguide/programming-key-policies.html#put-policy) // in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. -// Required permissions: kms:PutKeyPolicy -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: GetKeyPolicy func (c *Client) PutKeyPolicy(ctx context.Context, params *PutKeyPolicyInput, optFns ...func(*Options)) (*PutKeyPolicyOutput, error) { if params == nil { @@ -43,77 +39,55 @@ type PutKeyPolicyInput struct { // Sets the key policy on the specified KMS key. Specify the key ID or key ARN of // the KMS key. For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key - // ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string // The key policy to attach to the KMS key. The key policy must meet the following // criteria: - // - // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key - // policy must allow the principal that is making the PutKeyPolicy request to make - // a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the - // KMS key becomes unmanageable. For more information, refer to the scenario in the - // Default Key Policy - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section of the Key Management Service Developer Guide. - // - // * Each statement in the - // key policy must contain one or more principals. The principals in the key policy - // must exist and be visible to KMS. When you create a new Amazon Web Services - // principal (for example, an IAM user or role), you might need to enforce a delay - // before including the new principal in a key policy because the new principal - // might not be immediately visible to KMS. For more information, see Changes that - // I make are not always immediately visible - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) - // in the Amazon Web Services Identity and Access Management User Guide. - // - // A key - // policy document can include only the following characters: - // - // * Printable ASCII - // characters from the space character (\u0020) through the end of the ASCII - // character range. - // - // * Printable characters in the Basic Latin and Latin-1 - // Supplement character set (through \u00FF). - // - // * The tab (\u0009), line feed - // (\u000A), and carriage return (\u000D) special characters - // - // For information about - // key policies, see Key policies in KMS - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) in the - // Key Management Service Developer Guide.For help writing and formatting a JSON - // policy document, see the IAM JSON Policy Reference - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in - // the Identity and Access Management User Guide . + // - The key policy must allow the calling principal to make a subsequent + // PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key + // becomes unmanageable. For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key) + // in the Key Management Service Developer Guide. (To omit this condition, set + // BypassPolicyLockoutSafetyCheck to true.) + // - Each statement in the key policy must contain one or more principals. The + // principals in the key policy must exist and be visible to KMS. When you create a + // new Amazon Web Services principal, you might need to enforce a delay before + // including the new principal in a key policy because the new principal might not + // be immediately visible to KMS. For more information, see Changes that I make + // are not always immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the Amazon Web Services Identity and Access Management User Guide. + // A key policy document can include only the following characters: + // - Printable ASCII characters from the space character ( \u0020 ) through the + // end of the ASCII character range. + // - Printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF ). + // - The tab ( \u0009 ), line feed ( \u000A ), and carriage return ( \u000D ) + // special characters + // For information about key policies, see Key policies in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + // in the Key Management Service Developer Guide.For help writing and formatting a + // JSON policy document, see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) + // in the Identity and Access Management User Guide . // // This member is required. Policy *string - // The name of the key policy. The only valid value is default. + // The name of the key policy. The only valid value is default . // // This member is required. PolicyName *string - // A flag to indicate whether to bypass the key policy lockout safety check. - // Setting this value to true increases the risk that the KMS key becomes + // Skips ("bypasses") the key policy lockout safety check. The default value is + // false. Setting this value to true increases the risk that the KMS key becomes // unmanageable. Do not set this value to true indiscriminately. For more - // information, refer to the scenario in the Default Key Policy - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the Key Management Service Developer Guide. Use this parameter only - // when you intend to prevent the principal that is making the request from making - // a subsequent PutKeyPolicy request on the KMS key. The default value is false. + // information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key) + // in the Key Management Service Developer Guide. Use this parameter only when you + // intend to prevent the principal that is making the request from making a + // subsequent PutKeyPolicy request on the KMS key. BypassPolicyLockoutSafetyCheck bool noSmithyDocumentSerde @@ -177,6 +151,9 @@ func (c *Client) addOperationPutKeyPolicyMiddlewares(stack *middleware.Stack, op if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutKeyPolicy(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go index fe0f4e70..b78e3d3e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go @@ -11,93 +11,72 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Decrypts ciphertext and then reencrypts it entirely within KMS. You can use this -// operation to change the KMS key under which data is encrypted, such as when you -// manually rotate -// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-manually) +// Decrypts ciphertext and then reencrypts it entirely within KMS. You can use +// this operation to change the KMS key under which data is encrypted, such as when +// you manually rotate (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-manually) // a KMS key or change the KMS key that protects a ciphertext. You can also use it -// to reencrypt ciphertext under the same KMS key, such as to change the encryption -// context -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// to reencrypt ciphertext under the same KMS key, such as to change the +// encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // of a ciphertext. The ReEncrypt operation can decrypt ciphertext that was // encrypted by using a KMS key in an KMS operation, such as Encrypt or -// GenerateDataKey. It can also decrypt ciphertext that was encrypted by using the -// public key of an asymmetric KMS key -// (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks) +// GenerateDataKey . It can also decrypt ciphertext that was encrypted by using the +// public key of an asymmetric KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks) // outside of KMS. However, it cannot decrypt ciphertext produced by other -// libraries, such as the Amazon Web Services Encryption SDK -// (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) or Amazon -// S3 client-side encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). -// These libraries return a ciphertext format that is incompatible with KMS. When -// you use the ReEncrypt operation, you need to provide information for the decrypt -// operation and the subsequent encrypt operation. +// libraries, such as the Amazon Web Services Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) +// or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) +// . These libraries return a ciphertext format that is incompatible with KMS. When +// you use the ReEncrypt operation, you need to provide information for the +// decrypt operation and the subsequent encrypt operation. +// - If your ciphertext was encrypted under an asymmetric KMS key, you must use +// the SourceKeyId parameter to identify the KMS key that encrypted the +// ciphertext. You must also supply the encryption algorithm that was used. This +// information is required to decrypt the data. +// - If your ciphertext was encrypted under a symmetric encryption KMS key, the +// SourceKeyId parameter is optional. KMS can get this information from metadata +// that it adds to the symmetric ciphertext blob. This feature adds durability to +// your implementation by ensuring that authorized users can decrypt ciphertext +// decades after it was encrypted, even if they've lost track of the key ID. +// However, specifying the source KMS key is always recommended as a best practice. +// When you use the SourceKeyId parameter to specify a KMS key, KMS uses only the +// KMS key you specify. If the ciphertext was encrypted under a different KMS key, +// the ReEncrypt operation fails. This practice ensures that you use the KMS key +// that you intend. +// - To reencrypt the data, you must use the DestinationKeyId parameter to +// specify the KMS key that re-encrypts the data after it is decrypted. If the +// destination KMS key is an asymmetric KMS key, you must also provide the +// encryption algorithm. The algorithm that you choose must be compatible with the +// KMS key. When you use an asymmetric KMS key to encrypt or reencrypt data, be +// sure to record the KMS key and encryption algorithm that you choose. You will be +// required to provide the same KMS key and encryption algorithm when you decrypt +// the data. If the KMS key and algorithm do not match the values used to encrypt +// the data, the decrypt operation fails. You are not required to supply the key ID +// and encryption algorithm when you decrypt with symmetric encryption KMS keys +// because KMS stores this information in the ciphertext blob. KMS cannot store +// metadata in ciphertext generated with asymmetric keys. The standard format for +// asymmetric key ciphertext does not include configurable fields. // -// * If your ciphertext was -// encrypted under an asymmetric KMS key, you must use the SourceKeyId parameter to -// identify the KMS key that encrypted the ciphertext. You must also supply the -// encryption algorithm that was used. This information is required to decrypt the -// data. +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: Yes. The +// source KMS key and destination KMS key can be in different Amazon Web Services +// accounts. Either or both KMS keys can be in a different account than the caller. +// To specify a KMS key in a different account, you must use its key ARN or alias +// ARN. Required permissions: +// - kms:ReEncryptFrom (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// permission on the source KMS key (key policy) +// - kms:ReEncryptTo (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// permission on the destination KMS key (key policy) // -// * If your ciphertext was encrypted under a symmetric encryption KMS key, -// the SourceKeyId parameter is optional. KMS can get this information from -// metadata that it adds to the symmetric ciphertext blob. This feature adds -// durability to your implementation by ensuring that authorized users can decrypt -// ciphertext decades after it was encrypted, even if they've lost track of the key -// ID. However, specifying the source KMS key is always recommended as a best -// practice. When you use the SourceKeyId parameter to specify a KMS key, KMS uses -// only the KMS key you specify. If the ciphertext was encrypted under a different -// KMS key, the ReEncrypt operation fails. This practice ensures that you use the -// KMS key that you intend. -// -// * To reencrypt the data, you must use the -// DestinationKeyId parameter to specify the KMS key that re-encrypts the data -// after it is decrypted. If the destination KMS key is an asymmetric KMS key, you -// must also provide the encryption algorithm. The algorithm that you choose must -// be compatible with the KMS key. When you use an asymmetric KMS key to encrypt or -// reencrypt data, be sure to record the KMS key and encryption algorithm that you -// choose. You will be required to provide the same KMS key and encryption -// algorithm when you decrypt the data. If the KMS key and algorithm do not match -// the values used to encrypt the data, the decrypt operation fails. You are not -// required to supply the key ID and encryption algorithm when you decrypt with -// symmetric encryption KMS keys because KMS stores this information in the -// ciphertext blob. KMS cannot store metadata in ciphertext generated with -// asymmetric keys. The standard format for asymmetric key ciphertext does not -// include configurable fields. -// -// The KMS key that you use for this operation must -// be in a compatible key state. For details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: Yes. The source KMS -// key and destination KMS key can be in different Amazon Web Services accounts. -// Either or both KMS keys can be in a different account than the caller. To -// specify a KMS key in a different account, you must use its key ARN or alias ARN. -// Required permissions: -// -// * kms:ReEncryptFrom -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// permission on the source KMS key (key policy) -// -// * kms:ReEncryptTo -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// permission on the destination KMS key (key policy) -// -// To permit reencryption from -// or to a KMS key, include the "kms:ReEncrypt*" permission in your key policy -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html). This -// permission is automatically included in the key policy when you use the console -// to create a KMS key. But you must include it manually when you create a KMS key -// programmatically or when you use the PutKeyPolicy operation to set a key policy. -// Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKey -// -// * -// GenerateDataKeyPair +// To permit reencryption from or to a KMS key, include the "kms:ReEncrypt*" +// permission in your key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) +// . This permission is automatically included in the key policy when you use the +// console to create a KMS key. But you must include it manually when you create a +// KMS key programmatically or when you use the PutKeyPolicy operation to set a +// key policy. Related operations: +// - Decrypt +// - Encrypt +// - GenerateDataKey +// - GenerateDataKeyPair func (c *Client) ReEncrypt(ctx context.Context, params *ReEncryptInput, optFns ...func(*Options)) (*ReEncryptOutput, error) { if params == nil { params = &ReEncryptInput{} @@ -121,63 +100,54 @@ type ReEncryptInput struct { CiphertextBlob []byte // A unique identifier for the KMS key that is used to reencrypt the data. Specify - // a symmetric encryption KMS key or an asymmetric KMS key with a KeyUsage value of - // ENCRYPT_DECRYPT. To find the KeyUsage value of a KMS key, use the DescribeKey - // operation. To specify a KMS key, use its key ID, key ARN, alias name, or alias - // ARN. When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias ARN. - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * - // Alias name: alias/ExampleAlias - // - // * Alias ARN: - // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key - // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias - // ARN, use ListAliases. + // a symmetric encryption KMS key or an asymmetric KMS key with a KeyUsage value + // of ENCRYPT_DECRYPT . To find the KeyUsage value of a KMS key, use the + // DescribeKey operation. To specify a KMS key, use its key ID, key ARN, alias + // name, or alias ARN. When using an alias name, prefix it with "alias/" . To + // specify a KMS key in a different Amazon Web Services account, you must use the + // key ARN or alias ARN. For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Alias name: alias/ExampleAlias + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . To + // get the alias name and alias ARN, use ListAliases . // // This member is required. DestinationKeyId *string // Specifies the encryption algorithm that KMS will use to reecrypt the data after - // it has decrypted it. The default value, SYMMETRIC_DEFAULT, represents the + // it has decrypted it. The default value, SYMMETRIC_DEFAULT , represents the // encryption algorithm used for symmetric encryption KMS keys. This parameter is // required only when the destination KMS key is an asymmetric KMS key. DestinationEncryptionAlgorithm types.EncryptionAlgorithmSpec - // Specifies that encryption context to use when the reencrypting the data. A - // destination encryption context is valid only when the destination KMS key is a - // symmetric encryption KMS key. The standard ciphertext format for asymmetric KMS - // keys does not include fields for metadata. An encryption context is a collection - // of non-secret key-value pairs that represent additional authenticated data. When + // Specifies that encryption context to use when the reencrypting the data. Do not + // include confidential or sensitive information in this field. This field may be + // displayed in plaintext in CloudTrail logs and other output. A destination + // encryption context is valid only when the destination KMS key is a symmetric + // encryption KMS key. The standard ciphertext format for asymmetric KMS keys does + // not include fields for metadata. An encryption context is a collection of + // non-secret key-value pairs that represent additional authenticated data. When // you use an encryption context to encrypt data, you must specify the same (an // exact case-sensitive match) encryption context to decrypt the data. An // encryption context is supported only on operations with symmetric encryption KMS // keys. On operations with symmetric encryption KMS keys, an encryption context is // optional, but it is strongly recommended. For more information, see Encryption - // context - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the Key Management Service Developer Guide. DestinationEncryptionContext map[string]string // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string // Specifies the encryption algorithm that KMS will use to decrypt the ciphertext - // before it is reencrypted. The default value, SYMMETRIC_DEFAULT, represents the + // before it is reencrypted. The default value, SYMMETRIC_DEFAULT , represents the // algorithm used for symmetric encryption KMS keys. Specify the same algorithm // that was used to encrypt the ciphertext. If you specify a different algorithm, // the decrypt attempt fails. This parameter is required only when the ciphertext @@ -192,38 +162,28 @@ type ReEncryptInput struct { // the data. An encryption context is supported only on operations with symmetric // encryption KMS keys. On operations with symmetric encryption KMS keys, an // encryption context is optional, but it is strongly recommended. For more - // information, see Encryption context - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the Key Management Service Developer Guide. SourceEncryptionContext map[string]string // Specifies the KMS key that KMS will use to decrypt the ciphertext before it is // re-encrypted. Enter a key ID of the KMS key that was used to encrypt the // ciphertext. If you identify a different KMS key, the ReEncrypt operation throws - // an IncorrectKeyException. This parameter is required only when the ciphertext + // an IncorrectKeyException . This parameter is required only when the ciphertext // was encrypted under an asymmetric KMS key. If you used a symmetric encryption // KMS key, KMS can get the KMS key from metadata that it adds to the symmetric // ciphertext blob. However, it is always recommended as a best practice. This // practice ensures that you use the KMS key that you intend. To specify a KMS key, // use its key ID, key ARN, alias name, or alias ARN. When using an alias name, - // prefix it with "alias/". To specify a KMS key in a different Amazon Web Services - // account, you must use the key ARN or alias ARN. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * - // Alias name: alias/ExampleAlias - // - // * Alias ARN: - // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key - // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias - // ARN, use ListAliases. + // prefix it with "alias/" . To specify a KMS key in a different Amazon Web + // Services account, you must use the key ARN or alias ARN. For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Alias name: alias/ExampleAlias + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . To + // get the alias name and alias ARN, use ListAliases . SourceKeyId *string noSmithyDocumentSerde @@ -238,9 +198,8 @@ type ReEncryptOutput struct { // The encryption algorithm that was used to reencrypt the data. DestinationEncryptionAlgorithm types.EncryptionAlgorithmSpec - // The Amazon Resource Name (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that was used to reencrypt the data. + // The Amazon Resource Name ( key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) of the KMS key that was used to reencrypt the data. KeyId *string // The encryption algorithm that was used to decrypt the ciphertext before it was @@ -307,6 +266,9 @@ func (c *Client) addOperationReEncryptMiddlewares(stack *middleware.Stack, optio if err = stack.Initialize.Add(newServiceMetadataMiddleware_opReEncrypt(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go index b84259f9..305f711c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go @@ -22,45 +22,37 @@ import ( // interchangeably to encrypt data in one Amazon Web Services Region and decrypt it // in a different Amazon Web Services Region without re-encrypting the data or // making a cross-Region call. For more information about multi-Region keys, see -// Multi-Region keys in KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) // in the Key Management Service Developer Guide. A replica key is a // fully-functional KMS key that can be used independently of its primary and peer // replica keys. A primary key and its replica keys share properties that make them -// interoperable. They have the same key ID -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id) -// and key material. They also have the same key spec -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec), -// key usage -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage), -// key material origin -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin), -// and automatic key rotation status -// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html). KMS -// automatically synchronizes these shared properties among related multi-Region -// keys. All other properties of a replica key can differ, including its key policy -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html), tags -// (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html), -// aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html), -// and Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). KMS -// pricing and quotas for KMS keys apply to each primary key and replica key. When -// this operation completes, the new replica key has a transient key state of -// Creating. This key state changes to Enabled (or PendingImport) after a few +// interoperable. They have the same key ID (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id) +// and key material. They also have the same key spec (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec) +// , key usage (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage) +// , key material origin (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin) +// , and automatic key rotation status (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) +// . KMS automatically synchronizes these shared properties among related +// multi-Region keys. All other properties of a replica key can differ, including +// its key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) +// , tags (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html) +// , aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html) +// , and Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// . KMS pricing and quotas for KMS keys apply to each primary key and replica key. +// When this operation completes, the new replica key has a transient key state of +// Creating . This key state changes to Enabled (or PendingImport ) after a few // seconds when the process of creating the new replica key is complete. While the -// key state is Creating, you can manage key, but you cannot yet use it in +// key state is Creating , you can manage key, but you cannot yet use it in // cryptographic operations. If you are creating and using the replica key -// programmatically, retry on KMSInvalidStateException or call DescribeKey to check -// its KeyState value before using it. For details about the Creating key state, -// see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. You cannot create more than one replica -// of a primary key in any Region. If the Region already includes a replica of the -// key you're trying to replicate, ReplicateKey returns an AlreadyExistsException -// error. If the key state of the existing replica is PendingDeletion, you can -// cancel the scheduled key deletion (CancelKeyDeletion) or wait for the key to be -// deleted. The new replica key you create will have the same shared properties -// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-sync-properties) +// programmatically, retry on KMSInvalidStateException or call DescribeKey to +// check its KeyState value before using it. For details about the Creating key +// state, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. You cannot create more than one +// replica of a primary key in any Region. If the Region already includes a replica +// of the key you're trying to replicate, ReplicateKey returns an +// AlreadyExistsException error. If the key state of the existing replica is +// PendingDeletion , you can cancel the scheduled key deletion ( CancelKeyDeletion +// ) or wait for the key to be deleted. The new replica key you create will have +// the same shared properties (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-sync-properties) // as the original replica key. The CloudTrail log of a ReplicateKey operation // records a ReplicateKey operation in the primary key's Region and a CreateKey // operation in the replica key's Region. If you replicate a multi-Region primary @@ -69,26 +61,19 @@ import ( // For details, see Importing key material into multi-Region keys in the Key // Management Service Developer Guide. To convert a replica key to a primary key, // use the UpdatePrimaryRegion operation. ReplicateKey uses different default -// values for the KeyPolicy and Tags parameters than those used in the KMS console. -// For details, see the parameter descriptions. Cross-account use: No. You cannot -// use this operation to create a replica key in a different Amazon Web Services -// account. Required permissions: +// values for the KeyPolicy and Tags parameters than those used in the KMS +// console. For details, see the parameter descriptions. Cross-account use: No. You +// cannot use this operation to create a replica key in a different Amazon Web +// Services account. Required permissions: +// - kms:ReplicateKey on the primary key (in the primary key's Region). Include +// this permission in the primary key's key policy. +// - kms:CreateKey in an IAM policy in the replica Region. +// - To use the Tags parameter, kms:TagResource in an IAM policy in the replica +// Region. // -// * kms:ReplicateKey on the primary key (in the -// primary key's Region). Include this permission in the primary key's key -// policy. -// -// * kms:CreateKey in an IAM policy in the replica Region. -// -// * To use the -// Tags parameter, kms:TagResource in an IAM policy in the replica Region. -// -// Related -// operations -// -// * CreateKey -// -// * UpdatePrimaryRegion +// Related operations +// - CreateKey +// - UpdatePrimaryRegion func (c *Client) ReplicateKey(ctx context.Context, params *ReplicateKeyInput, optFns ...func(*Options)) (*ReplicateKeyOutput, error) { if params == nil { params = &ReplicateKeyInput{} @@ -108,120 +93,94 @@ type ReplicateKeyInput struct { // Identifies the multi-Region primary key that is being replicated. To determine // whether a KMS key is a multi-Region primary key, use the DescribeKey operation - // to check the value of the MultiRegionKeyType property. Specify the key ID or key - // ARN of a multi-Region primary key. For example: - // - // * Key ID: - // mrk-1234abcd12ab34cd56ef1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // to check the value of the MultiRegionKeyType property. Specify the key ID or + // key ARN of a multi-Region primary key. For example: + // - Key ID: mrk-1234abcd12ab34cd56ef1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string // The Region ID of the Amazon Web Services Region for this replica key. Enter the - // Region ID, such as us-east-1 or ap-southeast-2. For a list of Amazon Web - // Services Regions in which KMS is supported, see KMS service endpoints - // (https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region) in the - // Amazon Web Services General Reference. HMAC KMS keys are not supported in all - // Amazon Web Services Regions. If you try to replicate an HMAC KMS key in an + // Region ID, such as us-east-1 or ap-southeast-2 . For a list of Amazon Web + // Services Regions in which KMS is supported, see KMS service endpoints (https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region) + // in the Amazon Web Services General Reference. HMAC KMS keys are not supported in + // all Amazon Web Services Regions. If you try to replicate an HMAC KMS key in an // Amazon Web Services Region in which HMAC keys are not supported, the - // ReplicateKey operation returns an UnsupportedOperationException. For a list of - // Regions in which HMAC KMS keys are supported, see HMAC keys in KMS - // (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the Key - // Management Service Developer Guide. The replica must be in a different Amazon - // Web Services Region than its primary key and other replicas of that primary key, - // but in the same Amazon Web Services partition. KMS must be available in the - // replica Region. If the Region is not enabled by default, the Amazon Web Services - // account must be enabled in the Region. For information about Amazon Web Services - // partitions, see Amazon Resource Names (ARNs) - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in - // the Amazon Web Services General Reference. For information about enabling and - // disabling Regions, see Enabling a Region - // (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-enable) - // and Disabling a Region - // (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-disable) + // ReplicateKey operation returns an UnsupportedOperationException . For a list of + // Regions in which HMAC KMS keys are supported, see HMAC keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) + // in the Key Management Service Developer Guide. The replica must be in a + // different Amazon Web Services Region than its primary key and other replicas of + // that primary key, but in the same Amazon Web Services partition. KMS must be + // available in the replica Region. If the Region is not enabled by default, the + // Amazon Web Services account must be enabled in the Region. For information about + // Amazon Web Services partitions, see Amazon Resource Names (ARNs) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. For information about enabling and + // disabling Regions, see Enabling a Region (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-enable) + // and Disabling a Region (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-disable) // in the Amazon Web Services General Reference. // // This member is required. ReplicaRegion *string - // A flag to indicate whether to bypass the key policy lockout safety check. - // Setting this value to true increases the risk that the KMS key becomes + // Skips ("bypasses") the key policy lockout safety check. The default value is + // false. Setting this value to true increases the risk that the KMS key becomes // unmanageable. Do not set this value to true indiscriminately. For more - // information, refer to the scenario in the Default Key Policy - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the Key Management Service Developer Guide. Use this parameter only - // when you intend to prevent the principal that is making the request from making - // a subsequent PutKeyPolicy request on the KMS key. The default value is false. + // information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key) + // in the Key Management Service Developer Guide. Use this parameter only when you + // intend to prevent the principal that is making the request from making a + // subsequent PutKeyPolicy request on the KMS key. BypassPolicyLockoutSafetyCheck bool // A description of the KMS key. The default value is an empty string (no - // description). The description is not a shared property of multi-Region keys. You - // can specify the same description or a different description for each key in a - // set of related multi-Region keys. KMS does not synchronize this property. + // description). Do not include confidential or sensitive information in this + // field. This field may be displayed in plaintext in CloudTrail logs and other + // output. The description is not a shared property of multi-Region keys. You can + // specify the same description or a different description for each key in a set of + // related multi-Region keys. KMS does not synchronize this property. Description *string // The key policy to attach to the KMS key. This parameter is optional. If you do - // not provide a key policy, KMS attaches the default key policy - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + // not provide a key policy, KMS attaches the default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) // to the KMS key. The key policy is not a shared property of multi-Region keys. // You can specify the same key policy or a different key policy for each key in a // set of related multi-Region keys. KMS does not synchronize this property. If you // provide a key policy, it must meet the following criteria: - // - // * If you don't set - // BypassPolicyLockoutSafetyCheck to true, the key policy must give the caller - // kms:PutKeyPolicy permission on the replica key. This reduces the risk that the - // KMS key becomes unmanageable. For more information, refer to the scenario in the - // Default Key Policy - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section of the Key Management Service Developer Guide . - // - // * Each statement in the - // key policy must contain one or more principals. The principals in the key policy - // must exist and be visible to KMS. When you create a new Amazon Web Services - // principal (for example, an IAM user or role), you might need to enforce a delay - // before including the new principal in a key policy because the new principal - // might not be immediately visible to KMS. For more information, see Changes that - // I make are not always immediately visible - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // - The key policy must allow the calling principal to make a subsequent + // PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key + // becomes unmanageable. For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key) + // in the Key Management Service Developer Guide. (To omit this condition, set + // BypassPolicyLockoutSafetyCheck to true.) + // - Each statement in the key policy must contain one or more principals. The + // principals in the key policy must exist and be visible to KMS. When you create a + // new Amazon Web Services principal, you might need to enforce a delay before + // including the new principal in a key policy because the new principal might not + // be immediately visible to KMS. For more information, see Changes that I make + // are not always immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the Amazon Web Services Identity and Access Management User Guide. + // A key policy document can include only the following characters: + // - Printable ASCII characters from the space character ( \u0020 ) through the + // end of the ASCII character range. + // - Printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF ). + // - The tab ( \u0009 ), line feed ( \u000A ), and carriage return ( \u000D ) + // special characters + // For information about key policies, see Key policies in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + // in the Key Management Service Developer Guide. For help writing and formatting a + // JSON policy document, see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) // in the Identity and Access Management User Guide . - // - // A key policy document can - // include only the following characters: - // - // * Printable ASCII characters from the - // space character (\u0020) through the end of the ASCII character range. - // - // * - // Printable characters in the Basic Latin and Latin-1 Supplement character set - // (through \u00FF). - // - // * The tab (\u0009), line feed (\u000A), and carriage return - // (\u000D) special characters - // - // For information about key policies, see Key - // policies in KMS - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) in the - // Key Management Service Developer Guide. For help writing and formatting a JSON - // policy document, see the IAM JSON Policy Reference - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in - // the Identity and Access Management User Guide . Policy *string // Assigns one or more tags to the replica key. Use this parameter to tag the KMS // key when it is created. To tag an existing KMS key, use the TagResource - // operation. Tagging or untagging a KMS key can allow or deny permission to the - // KMS key. For details, see ABAC for KMS - // (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key - // Management Service Developer Guide. To use this parameter, you must have - // kms:TagResource - // (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) + // operation. Do not include confidential or sensitive information in this field. + // This field may be displayed in plaintext in CloudTrail logs and other output. + // Tagging or untagging a KMS key can allow or deny permission to the KMS key. For + // details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) + // in the Key Management Service Developer Guide. To use this parameter, you must + // have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // permission in an IAM policy. Tags are not a shared property of multi-Region // keys. You can specify the same tags or different tags for each key in a set of // related multi-Region keys. KMS does not synchronize this property. Each tag @@ -232,8 +191,8 @@ type ReplicateKeyInput struct { // specified one. When you add tags to an Amazon Web Services resource, Amazon Web // Services generates a cost allocation report with usage and costs aggregated by // tags. Tags can also be used to control access to a KMS key. For details, see - // Tagging Keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). + // Tagging Keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html) + // . Tags []types.Tag noSmithyDocumentSerde @@ -241,13 +200,11 @@ type ReplicateKeyInput struct { type ReplicateKeyOutput struct { - // Displays details about the new replica key, including its Amazon Resource Name - // (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // and Key states of KMS keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). It also - // includes the ARN and Amazon Web Services Region of its primary key and other - // replica keys. + // Displays details about the new replica key, including its Amazon Resource Name ( + // key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) and Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) + // . It also includes the ARN and Amazon Web Services Region of its primary key and + // other replica keys. ReplicaKeyMetadata *types.KeyMetadata // The key policy of the new replica key. The value is a key policy document in @@ -315,6 +272,9 @@ func (c *Client) addOperationReplicateKeyMiddlewares(stack *middleware.Stack, op if err = stack.Initialize.Add(newServiceMetadataMiddleware_opReplicateKey(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go index adb4876b..85e676e9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go @@ -11,36 +11,26 @@ import ( ) // Deletes a grant. Typically, you retire a grant when you no longer need its -// permissions. To identify the grant to retire, use a grant token -// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token), -// or both the grant ID and a key identifier (key ID or key ARN) of the KMS key. +// permissions. To identify the grant to retire, use a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) +// , or both the grant ID and a key identifier (key ID or key ARN) of the KMS key. // The CreateGrant operation returns both values. This operation can be called by // the retiring principal for a grant, by the grantee principal if the grant allows // the RetireGrant operation, and by the Amazon Web Services account in which the // grant is created. It can also be called by principals to whom permission for -// retiring a grant is delegated. For details, see Retiring and revoking grants -// (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) +// retiring a grant is delegated. For details, see Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) // in the Key Management Service Developer Guide. For detailed information about -// grants, including grant terminology, see Grants in KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key -// Management Service Developer Guide . For examples of working with grants in -// several programming languages, see Programming grants -// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// Cross-account use: Yes. You can retire a grant on a KMS key in a different +// grants, including grant terminology, see Grants in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) +// in the Key Management Service Developer Guide . For examples of working with +// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html) +// . Cross-account use: Yes. You can retire a grant on a KMS key in a different // Amazon Web Services account. Required permissions::Permission to retire a grant // is determined primarily by the grant. For details, see Retiring and revoking -// grants -// (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) +// grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) // in the Key Management Service Developer Guide. Related operations: -// -// * -// CreateGrant -// -// * ListGrants -// -// * ListRetirableGrants -// -// * RevokeGrant +// - CreateGrant +// - ListGrants +// - ListRetirableGrants +// - RevokeGrant func (c *Client) RetireGrant(ctx context.Context, params *RetireGrantInput, optFns ...func(*Options)) (*RetireGrantOutput, error) { if params == nil { params = &RetireGrantInput{} @@ -58,19 +48,16 @@ func (c *Client) RetireGrant(ctx context.Context, params *RetireGrantInput, optF type RetireGrantInput struct { - // Identifies the grant to retire. To get the grant ID, use CreateGrant, - // ListGrants, or ListRetirableGrants. - // - // * Grant ID Example - - // 0123456789012345678901234567890123456789012345678901234567890123 + // Identifies the grant to retire. To get the grant ID, use CreateGrant , + // ListGrants , or ListRetirableGrants . + // - Grant ID Example - + // 0123456789012345678901234567890123456789012345678901234567890123 GrantId *string // Identifies the grant to be retired. You can use a grant token to identify a new // grant even before it has achieved eventual consistency. Only the CreateGrant - // operation returns a grant token. For details, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Eventual consistency - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency) + // operation returns a grant token. For details, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency) // in the Key Management Service Developer Guide. GrantToken *string @@ -137,6 +124,9 @@ func (c *Client) addOperationRetireGrantMiddlewares(stack *middleware.Stack, opt if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRetireGrant(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go index 75676053..5750b216 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go @@ -11,33 +11,23 @@ import ( ) // Deletes the specified grant. You revoke a grant to terminate the permissions -// that the grant allows. For more information, see Retiring and revoking grants -// (https://docs.aws.amazon.com/kms/latest/developerguide/managing-grants.html#grant-delete) +// that the grant allows. For more information, see Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/managing-grants.html#grant-delete) // in the Key Management Service Developer Guide . When you create, retire, or // revoke a grant, there might be a brief delay, usually less than five minutes, // until the grant is available throughout KMS. This state is known as eventual -// consistency. For details, see Eventual consistency -// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency) +// consistency. For details, see Eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency) // in the Key Management Service Developer Guide . For detailed information about -// grants, including grant terminology, see Grants in KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key -// Management Service Developer Guide . For examples of working with grants in -// several programming languages, see Programming grants -// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// Cross-account use: Yes. To perform this operation on a KMS key in a different +// grants, including grant terminology, see Grants in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) +// in the Key Management Service Developer Guide . For examples of working with +// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html) +// . Cross-account use: Yes. To perform this operation on a KMS key in a different // Amazon Web Services account, specify the key ARN in the value of the KeyId -// parameter. Required permissions: kms:RevokeGrant -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// parameter. Required permissions: kms:RevokeGrant (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy). Related operations: -// -// * CreateGrant -// -// * ListGrants -// -// * -// ListRetirableGrants -// -// * RetireGrant +// - CreateGrant +// - ListGrants +// - ListRetirableGrants +// - RetireGrant func (c *Client) RevokeGrant(ctx context.Context, params *RevokeGrantInput, optFns ...func(*Options)) (*RevokeGrantOutput, error) { if params == nil { params = &RevokeGrantInput{} @@ -55,25 +45,20 @@ func (c *Client) RevokeGrant(ctx context.Context, params *RevokeGrantInput, optF type RevokeGrantInput struct { - // Identifies the grant to revoke. To get the grant ID, use CreateGrant, - // ListGrants, or ListRetirableGrants. + // Identifies the grant to revoke. To get the grant ID, use CreateGrant , + // ListGrants , or ListRetirableGrants . // // This member is required. GrantId *string - // A unique identifier for the KMS key associated with the grant. To get the key ID - // and key ARN for a KMS key, use ListKeys or DescribeKey. Specify the key ID or - // key ARN of the KMS key. To specify a KMS key in a different Amazon Web Services - // account, you must use the key ARN. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // A unique identifier for the KMS key associated with the grant. To get the key + // ID and key ARN for a KMS key, use ListKeys or DescribeKey . Specify the key ID + // or key ARN of the KMS key. To specify a KMS key in a different Amazon Web + // Services account, you must use the key ARN. For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -139,6 +124,9 @@ func (c *Client) addOperationRevokeGrantMiddlewares(stack *middleware.Stack, opt if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRevokeGrant(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go index 5d8b2891..10247684 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go @@ -12,54 +12,48 @@ import ( "time" ) -// Schedules the deletion of a KMS key. By default, KMS applies a waiting period of -// 30 days, but you can specify a waiting period of 7-30 days. When this operation -// is successful, the key state of the KMS key changes to PendingDeletion and the -// key can't be used in any cryptographic operations. It remains in this state for -// the duration of the waiting period. Before the waiting period ends, you can use -// CancelKeyDeletion to cancel the deletion of the KMS key. After the waiting -// period ends, KMS deletes the KMS key, its key material, and all KMS data +// Schedules the deletion of a KMS key. By default, KMS applies a waiting period +// of 30 days, but you can specify a waiting period of 7-30 days. When this +// operation is successful, the key state of the KMS key changes to PendingDeletion +// and the key can't be used in any cryptographic operations. It remains in this +// state for the duration of the waiting period. Before the waiting period ends, +// you can use CancelKeyDeletion to cancel the deletion of the KMS key. After the +// waiting period ends, KMS deletes the KMS key, its key material, and all KMS data // associated with it, including all aliases that refer to it. Deleting a KMS key // is a destructive and potentially dangerous operation. When a KMS key is deleted, // all data that was encrypted under the KMS key is unrecoverable. (The only -// exception is a multi-Region replica key.) To prevent the use of a KMS key -// without deleting it, use DisableKey. You can schedule the deletion of a +// exception is a multi-Region replica key , or an asymmetric or HMAC KMS key with +// imported key material[BUGBUG-link to +// importing-keys-managing.html#import-delete-key.) To prevent the use of a KMS key +// without deleting it, use DisableKey . You can schedule the deletion of a // multi-Region primary key and its replica keys at any time. However, KMS will not // delete a multi-Region primary key with existing replica keys. If you schedule // the deletion of a primary key with replicas, its key state changes to // PendingReplicaDeletion and it cannot be replicated or used in cryptographic // operations. This status can continue indefinitely. When the last of its replicas // keys is deleted (not just scheduled), the key state of the primary key changes -// to PendingDeletion and its waiting period (PendingWindowInDays) begins. For -// details, see Deleting multi-Region keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html) +// to PendingDeletion and its waiting period ( PendingWindowInDays ) begins. For +// details, see Deleting multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html) // in the Key Management Service Developer Guide. When KMS deletes a KMS key from -// an CloudHSM key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/delete-cmk-keystore.html), -// it makes a best effort to delete the associated key material from the associated -// CloudHSM cluster. However, you might need to manually delete the orphaned key -// material -// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) -// from the cluster and its backups. Deleting a KMS key from an external key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/delete-xks-key.html) has -// no effect on the associated external key. However, for both types of custom key -// stores, deleting a KMS key is destructive and irreversible. You cannot decrypt -// ciphertext encrypted under the KMS key by using only its associated external key -// or CloudHSM key. Also, you cannot recreate a KMS key in an external key store by -// creating a new KMS key with the same key material. For more information about -// scheduling a KMS key for deletion, see Deleting KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) in -// the Key Management Service Developer Guide. The KMS key that you use for this +// an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/delete-cmk-keystore.html) +// , it makes a best effort to delete the associated key material from the +// associated CloudHSM cluster. However, you might need to manually delete the +// orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) +// from the cluster and its backups. Deleting a KMS key from an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/delete-xks-key.html) +// has no effect on the associated external key. However, for both types of custom +// key stores, deleting a KMS key is destructive and irreversible. You cannot +// decrypt ciphertext encrypted under the KMS key by using only its associated +// external key or CloudHSM key. Also, you cannot recreate a KMS key in an external +// key store by creating a new KMS key with the same key material. For more +// information about scheduling a KMS key for deletion, see Deleting KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) +// in the Key Management Service Developer Guide. The KMS key that you use for this // operation must be in a compatible key state. For details, see Key states of KMS // keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in // the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. // Required permissions: kms:ScheduleKeyDeletion (key policy) Related operations -// -// * -// CancelKeyDeletion -// -// * DisableKey +// - CancelKeyDeletion +// - DisableKey func (c *Client) ScheduleKeyDeletion(ctx context.Context, params *ScheduleKeyDeletionInput, optFns ...func(*Options)) (*ScheduleKeyDeletionOutput, error) { if params == nil { params = &ScheduleKeyDeletionInput{} @@ -77,17 +71,12 @@ func (c *Client) ScheduleKeyDeletion(ctx context.Context, params *ScheduleKeyDel type ScheduleKeyDeletionInput struct { - // The unique identifier of the KMS key to delete. Specify the key ID or key ARN of - // the KMS key. For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key - // ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // The unique identifier of the KMS key to delete. Specify the key ID or key ARN + // of the KMS key. For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -97,7 +86,10 @@ type ScheduleKeyDeletionInput struct { // replica keys, the waiting period begins when the last of its replica keys is // deleted. Otherwise, the waiting period begins immediately. This value is // optional. If you include a value, it must be between 7 and 30, inclusive. If you - // do not include a value, it defaults to 30. + // do not include a value, it defaults to 30. You can use the + // kms:ScheduleKeyDeletionPendingWindowInDays (https://docs.aws.amazon.com/kms/latest/developerguide/conditions-kms.html#conditions-pending-deletion-window) + // condition key to further constrain the values that principals can specify in the + // PendingWindowInDays parameter. PendingWindowInDays *int32 noSmithyDocumentSerde @@ -111,15 +103,13 @@ type ScheduleKeyDeletionOutput struct { // deleted. DeletionDate *time.Time - // The Amazon Resource Name (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key whose deletion is scheduled. + // The Amazon Resource Name ( key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) of the KMS key whose deletion is scheduled. KeyId *string // The current status of the KMS key. For more information about how key state - // affects the use of a KMS key, see Key states of KMS keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the - // Key Management Service Developer Guide. + // affects the use of a KMS key, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) + // in the Key Management Service Developer Guide. KeyState types.KeyState // The waiting period before the KMS key is deleted. If the KMS key is a @@ -185,6 +175,9 @@ func (c *Client) addOperationScheduleKeyDeletionMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opScheduleKeyDeletion(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go index 4c66cbd9..cb87a977 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go @@ -15,8 +15,7 @@ import ( // for a message or message digest by using the private key in an asymmetric // signing KMS key. To verify the signature, use the Verify operation, or use the // public key in the same asymmetric KMS key outside of KMS. For information about -// asymmetric KMS keys, see Asymmetric KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// asymmetric KMS keys, see Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Key Management Service Developer Guide. Digital signatures are generated // and verified by using asymmetric key pair, such as an RSA or ECC pair that is // represented by an asymmetric KMS key. The key owner (or an authorized user) uses @@ -24,39 +23,32 @@ import ( // the message was signed with that particular private key and that the message // hasn't changed since it was signed. To use the Sign operation, provide the // following information: +// - Use the KeyId parameter to identify an asymmetric KMS key with a KeyUsage +// value of SIGN_VERIFY . To get the KeyUsage value of a KMS key, use the +// DescribeKey operation. The caller must have kms:Sign permission on the KMS +// key. +// - Use the Message parameter to specify the message or message digest to sign. +// You can submit messages of up to 4096 bytes. To sign a larger message, generate +// a hash digest of the message, and then provide the hash digest in the Message +// parameter. To indicate whether the message is a full message or a digest, use +// the MessageType parameter. +// - Choose a signing algorithm that is compatible with the KMS key. // -// * Use the KeyId parameter to identify an asymmetric KMS -// key with a KeyUsage value of SIGN_VERIFY. To get the KeyUsage value of a KMS -// key, use the DescribeKey operation. The caller must have kms:Sign permission on -// the KMS key. -// -// * Use the Message parameter to specify the message or message -// digest to sign. You can submit messages of up to 4096 bytes. To sign a larger -// message, generate a hash digest of the message, and then provide the hash digest -// in the Message parameter. To indicate whether the message is a full message or a -// digest, use the MessageType parameter. -// -// * Choose a signing algorithm that is -// compatible with the KMS key. -// -// When signing a message, be sure to record the KMS -// key and the signing algorithm. This information is required to verify the -// signature. Best practices recommend that you limit the time during which any -// signature is effective. This deters an attack where the actor uses a signed -// message to establish validity repeatedly or long after the message is -// superseded. Signatures do not include a timestamp, but you can include a -// timestamp in the signed message to help you detect when its time to refresh the -// signature. To verify the signature that this operation generates, use the Verify -// operation. Or use the GetPublicKey operation to download the public key and then -// use the public key to verify the signature outside of KMS. The KMS key that you -// use for this operation must be in a compatible key state. For details, see Key -// states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: Yes. To perform this -// operation with a KMS key in a different Amazon Web Services account, specify the -// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: -// kms:Sign -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// When signing a message, be sure to record the KMS key and the signing +// algorithm. This information is required to verify the signature. Best practices +// recommend that you limit the time during which any signature is effective. This +// deters an attack where the actor uses a signed message to establish validity +// repeatedly or long after the message is superseded. Signatures do not include a +// timestamp, but you can include a timestamp in the signed message to help you +// detect when its time to refresh the signature. To verify the signature that this +// operation generates, use the Verify operation. Or use the GetPublicKey +// operation to download the public key and then use the public key to verify the +// signature outside of KMS. The KMS key that you use for this operation must be in +// a compatible key state. For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: Yes. To +// perform this operation with a KMS key in a different Amazon Web Services +// account, specify the key ARN or alias ARN in the value of the KeyId parameter. +// Required permissions: kms:Sign (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: Verify func (c *Client) Sign(ctx context.Context, params *SignInput, optFns ...func(*Options)) (*SignOutput, error) { if params == nil { @@ -75,58 +67,68 @@ func (c *Client) Sign(ctx context.Context, params *SignInput, optFns ...func(*Op type SignInput struct { - // Identifies an asymmetric KMS key. KMS uses the private key in the asymmetric KMS - // key to sign the message. The KeyUsage type of the KMS key must be SIGN_VERIFY. - // To find the KeyUsage of a KMS key, use the DescribeKey operation. To specify a - // KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias - // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web - // Services account, you must use the key ARN or alias ARN. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * - // Alias name: alias/ExampleAlias - // - // * Alias ARN: - // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key - // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias - // ARN, use ListAliases. + // Identifies an asymmetric KMS key. KMS uses the private key in the asymmetric + // KMS key to sign the message. The KeyUsage type of the KMS key must be + // SIGN_VERIFY . To find the KeyUsage of a KMS key, use the DescribeKey operation. + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // For example: + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Alias name: alias/ExampleAlias + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . To + // get the alias name and alias ARN, use ListAliases . // // This member is required. KeyId *string // Specifies the message or message digest to sign. Messages can be 0-4096 bytes. - // To sign a larger message, provide the message digest. If you provide a message, - // KMS generates a hash digest of the message and then signs it. + // To sign a larger message, provide a message digest. If you provide a message + // digest, use the DIGEST value of MessageType to prevent the digest from being + // hashed again while signing. // // This member is required. Message []byte // Specifies the signing algorithm to use when signing the message. Choose an // algorithm that is compatible with the type and size of the specified asymmetric - // KMS key. + // KMS key. When signing with RSA key pairs, RSASSA-PSS algorithms are preferred. + // We include RSASSA-PKCS1-v1_5 algorithms for compatibility with existing + // applications. // // This member is required. SigningAlgorithm types.SigningAlgorithmSpec // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string - // Tells KMS whether the value of the Message parameter is a message or message - // digest. The default value, RAW, indicates a message. To indicate a message - // digest, enter DIGEST. + // Tells KMS whether the value of the Message parameter should be hashed as part + // of the signing algorithm. Use RAW for unhashed messages; use DIGEST for message + // digests, which are already hashed. When the value of MessageType is RAW , KMS + // uses the standard signing algorithm, which begins with a hash function. When the + // value is DIGEST , KMS skips the hashing step in the signing algorithm. Use the + // DIGEST value only when the value of the Message parameter is a message digest. + // If you use the DIGEST value with an unhashed message, the security of the + // signing operation can be compromised. When the value of MessageType is DIGEST , + // the length of the Message value must match the length of hashed messages for + // the specified signing algorithm. You can submit a message digest and omit the + // MessageType or specify RAW so the digest is hashed again while signing. + // However, this can cause verification failures when verifying with a system that + // assumes a single hash. The hashing algorithm in that Sign uses is based on the + // SigningAlgorithm value. + // - Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm. + // - Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm. + // - Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm. + // - SM2DSA uses the SM3 hashing algorithm. For details, see Offline + // verification with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification) + // . MessageType types.MessageType noSmithyDocumentSerde @@ -134,26 +136,21 @@ type SignInput struct { type SignOutput struct { - // The Amazon Resource Name (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the asymmetric KMS key that was used to sign the message. + // The Amazon Resource Name ( key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) of the asymmetric KMS key that was used to sign the message. KeyId *string // The cryptographic signature that was generated for the message. - // - // * When used - // with the supported RSA signing algorithms, the encoding of this value is defined - // by PKCS #1 in RFC 8017 (https://tools.ietf.org/html/rfc8017). - // - // * When used with - // the ECDSA_SHA_256, ECDSA_SHA_384, or ECDSA_SHA_512 signing algorithms, this - // value is a DER-encoded object as defined by ANS X9.62–2005 and RFC 3279 Section - // 2.2.3 (https://tools.ietf.org/html/rfc3279#section-2.2.3). This is the most - // commonly used signature format and is appropriate for most uses. - // - // When you use - // the HTTP API or the Amazon Web Services CLI, the value is Base64-encoded. - // Otherwise, it is not Base64-encoded. + // - When used with the supported RSA signing algorithms, the encoding of this + // value is defined by PKCS #1 in RFC 8017 (https://tools.ietf.org/html/rfc8017) + // . + // - When used with the ECDSA_SHA_256 , ECDSA_SHA_384 , or ECDSA_SHA_512 signing + // algorithms, this value is a DER-encoded object as defined by ANSI X9.62–2005 and + // RFC 3279 Section 2.2.3 (https://tools.ietf.org/html/rfc3279#section-2.2.3) . + // This is the most commonly used signature format and is appropriate for most + // uses. + // When you use the HTTP API or the Amazon Web Services CLI, the value is + // Base64-encoded. Otherwise, it is not Base64-encoded. Signature []byte // The signing algorithm that was used to sign the message. @@ -216,6 +213,9 @@ func (c *Client) addOperationSignMiddlewares(stack *middleware.Stack, options Op if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSign(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go index c7358175..885e4ee4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go @@ -11,48 +11,34 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Adds or edits tags on a customer managed key -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). -// Tagging or untagging a KMS key can allow or deny permission to the KMS key. For -// details, see ABAC for KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key -// Management Service Developer Guide. Each tag consists of a tag key and a tag -// value, both of which are case-sensitive strings. The tag value can be an empty -// (null) string. To add a tag, specify a new tag key and a tag value. To edit a -// tag, specify an existing tag key and a new tag value. You can use this operation -// to tag a customer managed key -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), -// but you cannot tag an Amazon Web Services managed key -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk), -// an Amazon Web Services owned key -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk), -// a custom key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#keystore-concept), -// or an alias -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#alias-concept). -// You can also add tags to a KMS key while creating it (CreateKey) or replicating -// it (ReplicateKey). For information about using tags in KMS, see Tagging keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). For -// general information about tags, including the format and syntax, see Tagging -// Amazon Web Services resources -// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the Amazon -// Web Services General Reference. The KMS key that you use for this operation must -// be in a compatible key state. For details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: No. You cannot +// Adds or edits tags on a customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) +// . Tagging or untagging a KMS key can allow or deny permission to the KMS key. +// For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) +// in the Key Management Service Developer Guide. Each tag consists of a tag key +// and a tag value, both of which are case-sensitive strings. The tag value can be +// an empty (null) string. To add a tag, specify a new tag key and a tag value. To +// edit a tag, specify an existing tag key and a new tag value. You can use this +// operation to tag a customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) +// , but you cannot tag an Amazon Web Services managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// , an Amazon Web Services owned key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) +// , a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#keystore-concept) +// , or an alias (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#alias-concept) +// . You can also add tags to a KMS key while creating it ( CreateKey ) or +// replicating it ( ReplicateKey ). For information about using tags in KMS, see +// Tagging keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html) +// . For general information about tags, including the format and syntax, see +// Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) +// in the Amazon Web Services General Reference. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. -// Required permissions: kms:TagResource -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations -// -// * CreateKey -// -// * ListResourceTags -// -// * -// ReplicateKey -// -// * UntagResource +// - CreateKey +// - ListResourceTags +// - ReplicateKey +// - UntagResource func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { params = &TagResourceInput{} @@ -72,21 +58,18 @@ type TagResourceInput struct { // Identifies a customer managed key in the account and Region. Specify the key ID // or key ARN of the KMS key. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string // One or more tags. Each tag consists of a tag key and a tag value. The tag value - // can be an empty (null) string. You cannot have more than one tag on a KMS key + // can be an empty (null) string. Do not include confidential or sensitive + // information in this field. This field may be displayed in plaintext in + // CloudTrail logs and other output. You cannot have more than one tag on a KMS key // with the same tag key. If you specify an existing tag key with a different tag // value, KMS replaces the current tag value with the specified one. // @@ -154,6 +137,9 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go index 121e4abd..f4821ccc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go @@ -10,37 +10,28 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes tags from a customer managed key -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). -// To delete a tag, specify the tag key and the KMS key. Tagging or untagging a KMS -// key can allow or deny permission to the KMS key. For details, see ABAC for KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key +// Deletes tags from a customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) +// . To delete a tag, specify the tag key and the KMS key. Tagging or untagging a +// KMS key can allow or deny permission to the KMS key. For details, see ABAC for +// KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key // Management Service Developer Guide. When it succeeds, the UntagResource // operation doesn't return any output. Also, if the specified tag key isn't found // on the KMS key, it doesn't throw an exception or return a response. To confirm // that the operation worked, use the ListResourceTags operation. For information -// about using tags in KMS, see Tagging keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). For -// general information about tags, including the format and syntax, see Tagging -// Amazon Web Services resources -// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the Amazon -// Web Services General Reference. The KMS key that you use for this operation must -// be in a compatible key state. For details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: No. You cannot +// about using tags in KMS, see Tagging keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html) +// . For general information about tags, including the format and syntax, see +// Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) +// in the Amazon Web Services General Reference. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. -// Required permissions: kms:UntagResource -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:UntagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations -// -// * CreateKey -// -// * ListResourceTags -// -// * -// ReplicateKey -// -// * TagResource +// - CreateKey +// - ListResourceTags +// - ReplicateKey +// - TagResource func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { if params == nil { params = &UntagResourceInput{} @@ -60,15 +51,10 @@ type UntagResourceInput struct { // Identifies the KMS key from which you are removing tags. Specify the key ID or // key ARN of the KMS key. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -139,6 +125,9 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go index 74d1a329..03f266df 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go @@ -14,47 +14,35 @@ import ( // associated with only one KMS key at a time, although a KMS key can have multiple // aliases. The alias and the KMS key must be in the same Amazon Web Services // account and Region. Adding, deleting, or updating an alias can allow or deny -// permission to the KMS key. For details, see ABAC for KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key -// Management Service Developer Guide. The current and new KMS key must be the same -// type (both symmetric or both asymmetric or both HMAC), and they must have the -// same key usage. This restriction prevents errors in code that uses aliases. If -// you must assign an alias to a different type of KMS key, use DeleteAlias to -// delete the old alias and CreateAlias to create a new alias. You cannot use -// UpdateAlias to change an alias name. To change an alias name, use DeleteAlias to -// delete the old alias and CreateAlias to create a new alias. Because an alias is -// not a property of a KMS key, you can create, update, and delete the aliases of a -// KMS key without affecting the KMS key. Also, aliases do not appear in the -// response from the DescribeKey operation. To get the aliases of all KMS keys in -// the account, use the ListAliases operation. The KMS key that you use for this -// operation must be in a compatible key state. For details, see Key states of KMS -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in -// the Key Management Service Developer Guide. Cross-account use: No. You cannot +// permission to the KMS key. For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) +// in the Key Management Service Developer Guide. The current and new KMS key must +// be the same type (both symmetric or both asymmetric or both HMAC), and they must +// have the same key usage. This restriction prevents errors in code that uses +// aliases. If you must assign an alias to a different type of KMS key, use +// DeleteAlias to delete the old alias and CreateAlias to create a new alias. You +// cannot use UpdateAlias to change an alias name. To change an alias name, use +// DeleteAlias to delete the old alias and CreateAlias to create a new alias. +// Because an alias is not a property of a KMS key, you can create, update, and +// delete the aliases of a KMS key without affecting the KMS key. Also, aliases do +// not appear in the response from the DescribeKey operation. To get the aliases +// of all KMS keys in the account, use the ListAliases operation. The KMS key that +// you use for this operation must be in a compatible key state. For details, see +// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. // Required permissions +// - kms:UpdateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the alias (IAM policy). +// - kms:UpdateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the current KMS key (key policy). +// - kms:UpdateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the new KMS key (key policy). // -// * kms:UpdateAlias -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the alias (IAM policy). -// -// * kms:UpdateAlias -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the current KMS key (key policy). -// -// * kms:UpdateAlias -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the new KMS key (key policy). -// -// For details, see Controlling access to aliases -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) +// For details, see Controlling access to aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) // in the Key Management Service Developer Guide. Related operations: -// -// * -// CreateAlias -// -// * DeleteAlias -// -// * ListAliases +// - CreateAlias +// - DeleteAlias +// - ListAliases func (c *Client) UpdateAlias(ctx context.Context, params *UpdateAliasInput, optFns ...func(*Options)) (*UpdateAliasOutput, error) { if params == nil { params = &UpdateAliasInput{} @@ -73,31 +61,26 @@ func (c *Client) UpdateAlias(ctx context.Context, params *UpdateAliasInput, optF type UpdateAliasInput struct { // Identifies the alias that is changing its KMS key. This value must begin with - // alias/ followed by the alias name, such as alias/ExampleAlias. You cannot use - // UpdateAlias to change the alias name. + // alias/ followed by the alias name, such as alias/ExampleAlias . You cannot use + // UpdateAlias to change the alias name. Do not include confidential or sensitive + // information in this field. This field may be displayed in plaintext in + // CloudTrail logs and other output. // // This member is required. AliasName *string - // Identifies the customer managed key - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) + // Identifies the customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) // to associate with the alias. You don't have permission to associate an alias - // with an Amazon Web Services managed key - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). - // The KMS key must be in the same Amazon Web Services account and Region as the + // with an Amazon Web Services managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) + // . The KMS key must be in the same Amazon Web Services account and Region as the // alias. Also, the new target KMS key must be the same type as the current target // KMS key (both symmetric or both asymmetric or both HMAC) and they must have the // same key usage. Specify the key ID or key ARN of the KMS key. For example: - // - // * - // Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To verify - // that the alias is mapped to the correct KMS key, use ListAliases. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . To + // verify that the alias is mapped to the correct KMS key, use ListAliases . // // This member is required. TargetKeyId *string @@ -163,6 +146,9 @@ func (c *Client) addOperationUpdateAliasMiddlewares(stack *middleware.Stack, opt if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateAlias(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go index 86b9d04c..a314404a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go @@ -17,8 +17,7 @@ import ( // remaining optional parameters to change its properties. This operation does not // return any property values. To verify the updated property values, use the // DescribeCustomKeyStores operation. This operation is part of the custom key -// stores -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) // feature in KMS, which combines the convenience and extensive integration of KMS // with the isolation and control of a key store that you own and manage. When // updating the properties of an external key store, verify that the updated @@ -36,47 +35,40 @@ import ( // UpdateCustomKeyStore operation. However, you can use the file to help you // determine the correct values for the UpdateCustomKeyStore parameters. For an // CloudHSM key store, you can use this operation to change the custom key store -// friendly name (NewCustomKeyStoreName), to tell KMS about a change to the kmsuser -// crypto user password (KeyStorePassword), or to associate the custom key store -// with a different, but related, CloudHSM cluster (CloudHsmClusterId). To update -// any property of an CloudHSM key store, the ConnectionState of the CloudHSM key -// store must be DISCONNECTED. For an external key store, you can use this -// operation to change the custom key store friendly name (NewCustomKeyStoreName), -// or to tell KMS about a change to the external key store proxy authentication -// credentials (XksProxyAuthenticationCredential), connection method -// (XksProxyConnectivity), external proxy endpoint (XksProxyUriEndpoint) and path -// (XksProxyUriPath). For external key stores with an XksProxyConnectivity of -// VPC_ENDPOINT_SERVICE, you can also update the Amazon VPC endpoint service name -// (XksProxyVpcEndpointServiceName). To update most properties of an external key -// store, the ConnectionState of the external key store must be DISCONNECTED. -// However, you can update the CustomKeyStoreName, -// XksProxyAuthenticationCredential, and XksProxyUriPath of an external key store +// friendly name ( NewCustomKeyStoreName ), to tell KMS about a change to the +// kmsuser crypto user password ( KeyStorePassword ), or to associate the custom +// key store with a different, but related, CloudHSM cluster ( CloudHsmClusterId ). +// To update any property of an CloudHSM key store, the ConnectionState of the +// CloudHSM key store must be DISCONNECTED . For an external key store, you can use +// this operation to change the custom key store friendly name ( +// NewCustomKeyStoreName ), or to tell KMS about a change to the external key store +// proxy authentication credentials ( XksProxyAuthenticationCredential ), +// connection method ( XksProxyConnectivity ), external proxy endpoint ( +// XksProxyUriEndpoint ) and path ( XksProxyUriPath ). For external key stores with +// an XksProxyConnectivity of VPC_ENDPOINT_SERVICE , you can also update the Amazon +// VPC endpoint service name ( XksProxyVpcEndpointServiceName ). To update most +// properties of an external key store, the ConnectionState of the external key +// store must be DISCONNECTED . However, you can update the CustomKeyStoreName , +// XksProxyAuthenticationCredential , and XksProxyUriPath of an external key store // when it is in the CONNECTED or DISCONNECTED state. If your update requires a -// DISCONNECTED state, before using UpdateCustomKeyStore, use the +// DISCONNECTED state, before using UpdateCustomKeyStore , use the // DisconnectCustomKeyStore operation to disconnect the custom key store. After the // UpdateCustomKeyStore operation completes, use the ConnectCustomKeyStore to // reconnect the custom key store. To find the ConnectionState of the custom key -// store, use the DescribeCustomKeyStores operation. Before updating the custom key -// store, verify that the new values allow KMS to connect the custom key store to -// its backing key store. For example, before you change the XksProxyUriPath value, -// verify that the external key store proxy is reachable at the new path. If the -// operation succeeds, it returns a JSON object with no properties. Cross-account -// use: No. You cannot perform this operation on a custom key store in a different -// Amazon Web Services account. Required permissions: kms:UpdateCustomKeyStore -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// store, use the DescribeCustomKeyStores operation. Before updating the custom +// key store, verify that the new values allow KMS to connect the custom key store +// to its backing key store. For example, before you change the XksProxyUriPath +// value, verify that the external key store proxy is reachable at the new path. If +// the operation succeeds, it returns a JSON object with no properties. +// Cross-account use: No. You cannot perform this operation on a custom key store +// in a different Amazon Web Services account. Required permissions: +// kms:UpdateCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (IAM policy) Related operations: -// -// * ConnectCustomKeyStore -// -// * -// CreateCustomKeyStore -// -// * DeleteCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * -// DisconnectCustomKeyStore +// - ConnectCustomKeyStore +// - CreateCustomKeyStore +// - DeleteCustomKeyStore +// - DescribeCustomKeyStores +// - DisconnectCustomKeyStore func (c *Client) UpdateCustomKeyStore(ctx context.Context, params *UpdateCustomKeyStoreInput, optFns ...func(*Options)) (*UpdateCustomKeyStoreOutput, error) { if params == nil { params = &UpdateCustomKeyStoreInput{} @@ -102,38 +94,38 @@ type UpdateCustomKeyStoreInput struct { CustomKeyStoreId *string // Associates the custom key store with a related CloudHSM cluster. This parameter - // is valid only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM. + // is valid only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM . // Enter the cluster ID of the cluster that you used to create the custom key store // or a cluster that shares a backup history and has the same cluster certificate // as the original cluster. You cannot use this parameter to associate a custom key // store with an unrelated cluster. In addition, the replacement cluster must - // fulfill the requirements - // (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) + // fulfill the requirements (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) // for a cluster associated with a custom key store. To view the cluster - // certificate of a cluster, use the DescribeClusters - // (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) + // certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) // operation. To change this value, the CloudHSM key store must be disconnected. CloudHsmClusterId *string // Enter the current password of the kmsuser crypto user (CU) in the CloudHSM // cluster that is associated with the custom key store. This parameter is valid - // only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM. This + // only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM . This // parameter tells KMS the current password of the kmsuser crypto user (CU). It // does not set or change the password of any users in the CloudHSM cluster. To // change this value, the CloudHSM key store must be disconnected. KeyStorePassword *string - // Changes the friendly name of the custom key store to the value that you specify. - // The custom key store name must be unique in the Amazon Web Services account. To + // Changes the friendly name of the custom key store to the value that you + // specify. The custom key store name must be unique in the Amazon Web Services + // account. Do not include confidential or sensitive information in this field. + // This field may be displayed in plaintext in CloudTrail logs and other output. To // change this value, an CloudHSM key store must be disconnected. An external key // store can be connected or disconnected. NewCustomKeyStoreName *string - // Changes the credentials that KMS uses to sign requests to the external key store - // proxy (XKS proxy). This parameter is valid only for custom key stores with a - // CustomKeyStoreType of EXTERNAL_KEY_STORE. You must specify both the AccessKeyId - // and SecretAccessKey value in the authentication credential, even if you are only - // updating one value. This parameter doesn't establish or change your + // Changes the credentials that KMS uses to sign requests to the external key + // store proxy (XKS proxy). This parameter is valid only for custom key stores with + // a CustomKeyStoreType of EXTERNAL_KEY_STORE . You must specify both the + // AccessKeyId and SecretAccessKey value in the authentication credential, even if + // you are only updating one value. This parameter doesn't establish or change your // authentication credentials on the proxy. It just tells KMS the credential that // you established with your external key store proxy. For example, if you rotate // the credential on your external key store proxy, you can use this parameter to @@ -143,10 +135,10 @@ type UpdateCustomKeyStoreInput struct { // Changes the connectivity setting for the external key store. To indicate that // the external key store proxy uses a Amazon VPC endpoint service to communicate - // with KMS, specify VPC_ENDPOINT_SERVICE. Otherwise, specify PUBLIC_ENDPOINT. If - // you change the XksProxyConnectivity to VPC_ENDPOINT_SERVICE, you must also + // with KMS, specify VPC_ENDPOINT_SERVICE . Otherwise, specify PUBLIC_ENDPOINT . If + // you change the XksProxyConnectivity to VPC_ENDPOINT_SERVICE , you must also // change the XksProxyUriEndpoint and add an XksProxyVpcEndpointServiceName value. - // If you change the XksProxyConnectivity to PUBLIC_ENDPOINT, you must also change + // If you change the XksProxyConnectivity to PUBLIC_ENDPOINT , you must also change // the XksProxyUriEndpoint and specify a null or empty string for the // XksProxyVpcEndpointServiceName value. To change this value, the external key // store must be disconnected. @@ -154,9 +146,9 @@ type UpdateCustomKeyStoreInput struct { // Changes the URI endpoint that KMS uses to connect to your external key store // proxy (XKS proxy). This parameter is valid only for custom key stores with a - // CustomKeyStoreType of EXTERNAL_KEY_STORE. For external key stores with an - // XksProxyConnectivity value of PUBLIC_ENDPOINT, the protocol must be HTTPS. For - // external key stores with an XksProxyConnectivity value of VPC_ENDPOINT_SERVICE, + // CustomKeyStoreType of EXTERNAL_KEY_STORE . For external key stores with an + // XksProxyConnectivity value of PUBLIC_ENDPOINT , the protocol must be HTTPS. For + // external key stores with an XksProxyConnectivity value of VPC_ENDPOINT_SERVICE , // specify https:// followed by the private DNS name associated with the VPC // endpoint service. Each external key store must use a different private DNS name. // The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique in @@ -167,10 +159,10 @@ type UpdateCustomKeyStoreInput struct { // Changes the base path to the proxy APIs for this external key store. To find // this value, see the documentation for your external key manager and external key // store proxy (XKS proxy). This parameter is valid only for custom key stores with - // a CustomKeyStoreType of EXTERNAL_KEY_STORE. The value must start with / and must - // end with /kms/xks/v1, where v1 represents the version of the KMS external key - // store proxy API. You can include an optional prefix between the required - // elements such as /example/kms/xks/v1. The combined XksProxyUriEndpoint and + // a CustomKeyStoreType of EXTERNAL_KEY_STORE . The value must start with / and + // must end with /kms/xks/v1 , where v1 represents the version of the KMS external + // key store proxy API. You can include an optional prefix between the required + // elements such as /example/kms/xks/v1 . The combined XksProxyUriEndpoint and // XksProxyUriPath values must be unique in the Amazon Web Services account and // Region. You can change this value when the external key store is connected or // disconnected. @@ -179,7 +171,7 @@ type UpdateCustomKeyStoreInput struct { // Changes the name that KMS uses to identify the Amazon VPC endpoint service for // your external key store proxy (XKS proxy). This parameter is valid when the // CustomKeyStoreType is EXTERNAL_KEY_STORE and the XksProxyConnectivity is - // VPC_ENDPOINT_SERVICE. To change this value, the external key store must be + // VPC_ENDPOINT_SERVICE . To change this value, the external key store must be // disconnected. XksProxyVpcEndpointServiceName *string @@ -244,6 +236,9 @@ func (c *Client) addOperationUpdateCustomKeyStoreMiddlewares(stack *middleware.S if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateCustomKeyStore(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go index b0700a41..2f7edbde 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go @@ -11,18 +11,14 @@ import ( ) // Updates the description of a KMS key. To see the description of a KMS key, use -// DescribeKey. The KMS key that you use for this operation must be in a compatible -// key state. For details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: No. You cannot +// DescribeKey . The KMS key that you use for this operation must be in a +// compatible key state. For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot // perform this operation on a KMS key in a different Amazon Web Services account. -// Required permissions: kms:UpdateKeyDescription -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// Required permissions: kms:UpdateKeyDescription (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations -// -// * CreateKey -// -// * DescribeKey +// - CreateKey +// - DescribeKey func (c *Client) UpdateKeyDescription(ctx context.Context, params *UpdateKeyDescriptionInput, optFns ...func(*Options)) (*UpdateKeyDescriptionOutput, error) { if params == nil { params = &UpdateKeyDescriptionInput{} @@ -40,22 +36,19 @@ func (c *Client) UpdateKeyDescription(ctx context.Context, params *UpdateKeyDesc type UpdateKeyDescriptionInput struct { - // New description for the KMS key. + // New description for the KMS key. Do not include confidential or sensitive + // information in this field. This field may be displayed in plaintext in + // CloudTrail logs and other output. // // This member is required. Description *string // Updates the description of the specified KMS key. Specify the key ID or key ARN // of the KMS key. For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * - // Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string @@ -121,6 +114,9 @@ func (c *Client) addOperationUpdateKeyDescriptionMiddlewares(stack *middleware.S if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateKeyDescription(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go index e7b87d5d..49ed61da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go @@ -13,11 +13,10 @@ import ( // Changes the primary key of a multi-Region key. This operation changes the // replica key in the specified Region to a primary key and changes the former // primary key to a replica key. For example, suppose you have a primary key in -// us-east-1 and a replica key in eu-west-2. If you run UpdatePrimaryRegion with a -// PrimaryRegion value of eu-west-2, the primary key is now the key in eu-west-2, -// and the key in us-east-1 becomes a replica key. For details, see Updating the -// primary Region -// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-update) +// us-east-1 and a replica key in eu-west-2 . If you run UpdatePrimaryRegion with +// a PrimaryRegion value of eu-west-2 , the primary key is now the key in eu-west-2 +// , and the key in us-east-1 becomes a replica key. For details, see Updating the +// primary Region (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-update) // in the Key Management Service Developer Guide. This operation supports // multi-Region keys, an KMS feature that lets you create multiple interoperable // KMS keys in different Amazon Web Services Regions. Because these KMS keys have @@ -25,22 +24,15 @@ import ( // interchangeably to encrypt data in one Amazon Web Services Region and decrypt it // in a different Amazon Web Services Region without re-encrypting the data or // making a cross-Region call. For more information about multi-Region keys, see -// Multi-Region keys in KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) // in the Key Management Service Developer Guide. The primary key of a multi-Region // key is the source for properties that are always shared by primary and replica -// keys, including the key material, key ID -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id), -// key spec -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec), -// key usage -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage), -// key material origin -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin), -// and automatic key rotation -// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html). It's -// the only key that can be replicated. You cannot delete the primary key -// (https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html) +// keys, including the key material, key ID (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id) +// , key spec (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec) +// , key usage (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage) +// , key material origin (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin) +// , and automatic key rotation (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) +// . It's the only key that can be replicated. You cannot delete the primary key (https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html) // until all replica keys are deleted. The key ID and primary Region that you // specify uniquely identify the replica key that will become the primary key. The // primary Region must already have a replica key. This operation does not create a @@ -50,32 +42,25 @@ import ( // multi-Region keys in cryptographic operations. This operation should not delay, // interrupt, or cause failures in cryptographic operations. Even after this // operation completes, the process of updating the primary Region might still be -// in progress for a few more seconds. Operations such as DescribeKey might display -// both the old and new primary keys as replicas. The old and new primary keys have -// a transient key state of Updating. The original key state is restored when the -// update is complete. While the key state is Updating, you can use the keys in -// cryptographic operations, but you cannot replicate the new primary key or -// perform certain management operations, such as enabling or disabling these keys. -// For details about the Updating key state, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. This operation does not return any -// output. To verify that primary key is changed, use the DescribeKey operation. -// Cross-account use: No. You cannot use this operation in a different Amazon Web -// Services account. Required permissions: +// in progress for a few more seconds. Operations such as DescribeKey might +// display both the old and new primary keys as replicas. The old and new primary +// keys have a transient key state of Updating . The original key state is restored +// when the update is complete. While the key state is Updating , you can use the +// keys in cryptographic operations, but you cannot replicate the new primary key +// or perform certain management operations, such as enabling or disabling these +// keys. For details about the Updating key state, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. This operation does not return +// any output. To verify that primary key is changed, use the DescribeKey +// operation. Cross-account use: No. You cannot use this operation in a different +// Amazon Web Services account. Required permissions: +// - kms:UpdatePrimaryRegion on the current primary key (in the primary key's +// Region). Include this permission primary key's key policy. +// - kms:UpdatePrimaryRegion on the current replica key (in the replica key's +// Region). Include this permission in the replica key's key policy. // -// * kms:UpdatePrimaryRegion on the -// current primary key (in the primary key's Region). Include this permission -// primary key's key policy. -// -// * kms:UpdatePrimaryRegion on the current replica key -// (in the replica key's Region). Include this permission in the replica key's key -// policy. -// -// # Related operations -// -// * CreateKey -// -// * ReplicateKey +// Related operations +// - CreateKey +// - ReplicateKey func (c *Client) UpdatePrimaryRegion(ctx context.Context, params *UpdatePrimaryRegionInput, optFns ...func(*Options)) (*UpdatePrimaryRegionOutput, error) { if params == nil { params = &UpdatePrimaryRegionInput{} @@ -96,22 +81,18 @@ type UpdatePrimaryRegionInput struct { // Identifies the current primary key. When the operation completes, this KMS key // will be a replica key. Specify the key ID or key ARN of a multi-Region primary // key. For example: - // - // * Key ID: mrk-1234abcd12ab34cd56ef1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab - // - // To - // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // - Key ID: mrk-1234abcd12ab34cd56ef1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . // // This member is required. KeyId *string - // The Amazon Web Services Region of the new primary key. Enter the Region ID, such - // as us-east-1 or ap-southeast-2. There must be an existing replica key in this - // Region. When the operation completes, the multi-Region key in this Region will - // be the primary key. + // The Amazon Web Services Region of the new primary key. Enter the Region ID, + // such as us-east-1 or ap-southeast-2 . There must be an existing replica key in + // this Region. When the operation completes, the multi-Region key in this Region + // will be the primary key. // // This member is required. PrimaryRegion *string @@ -177,6 +158,9 @@ func (c *Client) addOperationUpdatePrimaryRegionMiddlewares(stack *middleware.St if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdatePrimaryRegion(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go index f118f5da..a94d689f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go @@ -15,35 +15,33 @@ import ( // Verification confirms that an authorized user signed the message with the // specified KMS key and signing algorithm, and the message hasn't changed since it // was signed. If the signature is verified, the value of the SignatureValid field -// in the response is True. If the signature verification fails, the Verify +// in the response is True . If the signature verification fails, the Verify // operation fails with an KMSInvalidSignatureException exception. A digital // signature is generated by using the private key in an asymmetric KMS key. The // signature is verified by using the public key in the same asymmetric KMS key. -// For information about asymmetric KMS keys, see Asymmetric KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the Key Management Service Developer Guide. To verify a digital signature, -// you can use the Verify operation. Specify the same asymmetric KMS key, message, -// and signing algorithm that were used to produce the signature. You can also +// For information about asymmetric KMS keys, see Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the Key Management Service Developer Guide. To use the Verify operation, +// specify the same asymmetric KMS key, message, and signing algorithm that were +// used to produce the signature. The message type does not need to be the same as +// the one used for signing, but it must indicate whether the value of the Message +// parameter should be hashed as part of the verification process. You can also // verify the digital signature by using the public key of the KMS key outside of -// KMS. Use the GetPublicKey operation to download the public key in the asymmetric -// KMS key and then use the public key to verify the signature outside of KMS. The -// advantage of using the Verify operation is that it is performed within KMS. As a -// result, it's easy to call, the operation is performed within the FIPS boundary, -// it is logged in CloudTrail, and you can use key policy and IAM policy to -// determine who is authorized to use the KMS key to verify signatures. To verify a -// signature outside of KMS with an SM2 public key (China Regions only), you must -// specify the distinguishing ID. By default, KMS uses 1234567812345678 as the -// distinguishing ID. For more information, see Offline verification with SM2 key -// pairs -// (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification). -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: Yes. To perform this -// operation with a KMS key in a different Amazon Web Services account, specify the -// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: -// kms:Verify -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// KMS. Use the GetPublicKey operation to download the public key in the +// asymmetric KMS key and then use the public key to verify the signature outside +// of KMS. The advantage of using the Verify operation is that it is performed +// within KMS. As a result, it's easy to call, the operation is performed within +// the FIPS boundary, it is logged in CloudTrail, and you can use key policy and +// IAM policy to determine who is authorized to use the KMS key to verify +// signatures. To verify a signature outside of KMS with an SM2 public key (China +// Regions only), you must specify the distinguishing ID. By default, KMS uses +// 1234567812345678 as the distinguishing ID. For more information, see Offline +// verification with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification) +// . The KMS key that you use for this operation must be in a compatible key state. +// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. Cross-account use: Yes. To +// perform this operation with a KMS key in a different Amazon Web Services +// account, specify the key ARN or alias ARN in the value of the KeyId parameter. +// Required permissions: kms:Verify (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: Sign func (c *Client) Verify(ctx context.Context, params *VerifyInput, optFns ...func(*Options)) (*VerifyOutput, error) { if params == nil { @@ -66,31 +64,22 @@ type VerifyInput struct { // This must be the same KMS key that was used to generate the signature. If you // specify a different KMS key, the signature verification fails. To specify a KMS // key, use its key ID, key ARN, alias name, or alias ARN. When using an alias - // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web + // name, prefix it with "alias/" . To specify a KMS key in a different Amazon Web // Services account, you must use the key ARN or alias ARN. For example: - // - // * Key ID: - // 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * - // Alias name: alias/ExampleAlias - // - // * Alias ARN: - // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key - // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias - // ARN, use ListAliases. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Alias name: alias/ExampleAlias + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey . To + // get the alias name and alias ARN, use ListAliases . // // This member is required. KeyId *string // Specifies the message that was signed. You can submit a raw message of up to // 4096 bytes, or a hash digest of the message. If you submit a digest, use the - // MessageType parameter with a value of DIGEST. If the message specified here is + // MessageType parameter with a value of DIGEST . If the message specified here is // different from the message that was signed, the signature verification fails. A // message and its hash digest are considered to be the same message. // @@ -110,18 +99,31 @@ type VerifyInput struct { // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string - // Tells KMS whether the value of the Message parameter is a message or message - // digest. The default value, RAW, indicates a message. To indicate a message - // digest, enter DIGEST. Use the DIGEST value only when the value of the Message - // parameter is a message digest. If you use the DIGEST value with a raw message, - // the security of the verification operation can be compromised. + // Tells KMS whether the value of the Message parameter should be hashed as part + // of the signing algorithm. Use RAW for unhashed messages; use DIGEST for message + // digests, which are already hashed. When the value of MessageType is RAW , KMS + // uses the standard signing algorithm, which begins with a hash function. When the + // value is DIGEST , KMS skips the hashing step in the signing algorithm. Use the + // DIGEST value only when the value of the Message parameter is a message digest. + // If you use the DIGEST value with an unhashed message, the security of the + // verification operation can be compromised. When the value of MessageType is + // DIGEST , the length of the Message value must match the length of hashed + // messages for the specified signing algorithm. You can submit a message digest + // and omit the MessageType or specify RAW so the digest is hashed again while + // signing. However, if the signed message is hashed once while signing, but twice + // while verifying, verification fails, even when the message hasn't changed. The + // hashing algorithm in that Verify uses is based on the SigningAlgorithm value. + // - Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm. + // - Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm. + // - Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm. + // - SM2DSA uses the SM3 hashing algorithm. For details, see Offline + // verification with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification) + // . MessageType types.MessageType noSmithyDocumentSerde @@ -129,9 +131,8 @@ type VerifyInput struct { type VerifyOutput struct { - // The Amazon Resource Name (key ARN - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the asymmetric KMS key that was used to verify the signature. + // The Amazon Resource Name ( key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN) + // ) of the asymmetric KMS key that was used to verify the signature. KeyId *string // A Boolean value that indicates whether the signature was verified. A value of @@ -200,6 +201,9 @@ func (c *Client) addOperationVerifyMiddlewares(stack *middleware.Stack, options if err = stack.Initialize.Add(newServiceMetadataMiddleware_opVerify(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go index bbd63d79..1e36fb58 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go @@ -12,24 +12,23 @@ import ( ) // Verifies the hash-based message authentication code (HMAC) for a specified -// message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac computes -// an HMAC using the message, HMAC KMS key, and MAC algorithm that you specify, and -// compares the computed HMAC to the HMAC that you specify. If the HMACs are -// identical, the verification succeeds; otherwise, it fails. Verification -// indicates that the message hasn't changed since the HMAC was calculated, and the -// specified key was used to generate and verify the HMAC. HMAC KMS keys and the -// HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104 -// (https://datatracker.ietf.org/doc/html/rfc2104). This operation is part of KMS -// support for HMAC KMS keys. For details, see HMAC keys in KMS -// (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the Key -// Management Service Developer Guide. The KMS key that you use for this operation -// must be in a compatible key state. For details, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide. Cross-account use: Yes. To perform this -// operation with a KMS key in a different Amazon Web Services account, specify the -// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: -// kms:VerifyMac -// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac +// computes an HMAC using the message, HMAC KMS key, and MAC algorithm that you +// specify, and compares the computed HMAC to the HMAC that you specify. If the +// HMACs are identical, the verification succeeds; otherwise, it fails. +// Verification indicates that the message hasn't changed since the HMAC was +// calculated, and the specified key was used to generate and verify the HMAC. HMAC +// KMS keys and the HMAC algorithms that KMS uses conform to industry standards +// defined in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104) . This +// operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys +// in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the +// Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: Yes. To perform +// this operation with a KMS key in a different Amazon Web Services account, +// specify the key ARN or alias ARN in the value of the KeyId parameter. Required +// permissions: kms:VerifyMac (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) Related operations: GenerateMac func (c *Client) VerifyMac(ctx context.Context, params *VerifyMacInput, optFns ...func(*Options)) (*VerifyMacOutput, error) { if params == nil { @@ -48,8 +47,8 @@ func (c *Client) VerifyMac(ctx context.Context, params *VerifyMacInput, optFns . type VerifyMacInput struct { - // The KMS key that will be used in the verification. Enter a key ID of the KMS key - // that was used to generate the HMAC. If you identify a different KMS key, the + // The KMS key that will be used in the verification. Enter a key ID of the KMS + // key that was used to generate the HMAC. If you identify a different KMS key, the // VerifyMac operation fails. // // This member is required. @@ -79,10 +78,8 @@ type VerifyMacInput struct { // A list of grant tokens. Use a grant token when your permission to call this // operation comes from a new grant that has not yet achieved eventual consistency. - // For more information, see Grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token - // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) // in the Key Management Service Developer Guide. GrantTokens []string @@ -98,8 +95,8 @@ type VerifyMacOutput struct { MacAlgorithm types.MacAlgorithmSpec // A Boolean value that indicates whether the HMAC was verified. A value of True - // indicates that the HMAC (Mac) was generated with the specified Message, HMAC KMS - // key (KeyID) and MacAlgorithm.. If the HMAC is not verified, the VerifyMac + // indicates that the HMAC ( Mac ) was generated with the specified Message , HMAC + // KMS key ( KeyID ) and MacAlgorithm. . If the HMAC is not verified, the VerifyMac // operation fails with a KMSInvalidMacException exception. This exception // indicates that one or more of the inputs changed since the HMAC was computed. MacValid bool @@ -161,6 +158,9 @@ func (c *Client) addOperationVerifyMacMiddlewares(stack *middleware.Stack, optio if err = stack.Initialize.Add(newServiceMetadataMiddleware_opVerifyMac(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go index 17e5b82a..4e3fbc1c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go @@ -88,9 +88,9 @@ func awsAwsjson11_deserializeOpErrorCancelKeyDeletion(response *smithyhttp.Respo errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -99,7 +99,7 @@ func awsAwsjson11_deserializeOpErrorCancelKeyDeletion(response *smithyhttp.Respo body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -111,8 +111,8 @@ func awsAwsjson11_deserializeOpErrorCancelKeyDeletion(response *smithyhttp.Respo } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -211,9 +211,9 @@ func awsAwsjson11_deserializeOpErrorConnectCustomKeyStore(response *smithyhttp.R errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -222,7 +222,7 @@ func awsAwsjson11_deserializeOpErrorConnectCustomKeyStore(response *smithyhttp.R body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -234,8 +234,8 @@ func awsAwsjson11_deserializeOpErrorConnectCustomKeyStore(response *smithyhttp.R } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -312,9 +312,9 @@ func awsAwsjson11_deserializeOpErrorCreateAlias(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -323,7 +323,7 @@ func awsAwsjson11_deserializeOpErrorCreateAlias(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -335,8 +335,8 @@ func awsAwsjson11_deserializeOpErrorCreateAlias(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -441,9 +441,9 @@ func awsAwsjson11_deserializeOpErrorCreateCustomKeyStore(response *smithyhttp.Re errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -452,7 +452,7 @@ func awsAwsjson11_deserializeOpErrorCreateCustomKeyStore(response *smithyhttp.Re body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -464,8 +464,8 @@ func awsAwsjson11_deserializeOpErrorCreateCustomKeyStore(response *smithyhttp.Re } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -600,9 +600,9 @@ func awsAwsjson11_deserializeOpErrorCreateGrant(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -611,7 +611,7 @@ func awsAwsjson11_deserializeOpErrorCreateGrant(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -623,8 +623,8 @@ func awsAwsjson11_deserializeOpErrorCreateGrant(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -732,9 +732,9 @@ func awsAwsjson11_deserializeOpErrorCreateKey(response *smithyhttp.Response, met errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -743,7 +743,7 @@ func awsAwsjson11_deserializeOpErrorCreateKey(response *smithyhttp.Response, met body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -755,8 +755,8 @@ func awsAwsjson11_deserializeOpErrorCreateKey(response *smithyhttp.Response, met } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -879,9 +879,9 @@ func awsAwsjson11_deserializeOpErrorDecrypt(response *smithyhttp.Response, metad errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -890,7 +890,7 @@ func awsAwsjson11_deserializeOpErrorDecrypt(response *smithyhttp.Response, metad body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -902,8 +902,8 @@ func awsAwsjson11_deserializeOpErrorDecrypt(response *smithyhttp.Response, metad } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -995,9 +995,9 @@ func awsAwsjson11_deserializeOpErrorDeleteAlias(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1006,7 +1006,7 @@ func awsAwsjson11_deserializeOpErrorDeleteAlias(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1018,8 +1018,8 @@ func awsAwsjson11_deserializeOpErrorDeleteAlias(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1115,9 +1115,9 @@ func awsAwsjson11_deserializeOpErrorDeleteCustomKeyStore(response *smithyhttp.Re errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1126,7 +1126,7 @@ func awsAwsjson11_deserializeOpErrorDeleteCustomKeyStore(response *smithyhttp.Re body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1138,8 +1138,8 @@ func awsAwsjson11_deserializeOpErrorDeleteCustomKeyStore(response *smithyhttp.Re } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1213,9 +1213,9 @@ func awsAwsjson11_deserializeOpErrorDeleteImportedKeyMaterial(response *smithyht errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1224,7 +1224,7 @@ func awsAwsjson11_deserializeOpErrorDeleteImportedKeyMaterial(response *smithyht body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1236,8 +1236,8 @@ func awsAwsjson11_deserializeOpErrorDeleteImportedKeyMaterial(response *smithyht } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1339,9 +1339,9 @@ func awsAwsjson11_deserializeOpErrorDescribeCustomKeyStores(response *smithyhttp errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1350,7 +1350,7 @@ func awsAwsjson11_deserializeOpErrorDescribeCustomKeyStores(response *smithyhttp body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1362,8 +1362,8 @@ func awsAwsjson11_deserializeOpErrorDescribeCustomKeyStores(response *smithyhttp } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1456,9 +1456,9 @@ func awsAwsjson11_deserializeOpErrorDescribeKey(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1467,7 +1467,7 @@ func awsAwsjson11_deserializeOpErrorDescribeKey(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1479,8 +1479,8 @@ func awsAwsjson11_deserializeOpErrorDescribeKey(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1554,9 +1554,9 @@ func awsAwsjson11_deserializeOpErrorDisableKey(response *smithyhttp.Response, me errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1565,7 +1565,7 @@ func awsAwsjson11_deserializeOpErrorDisableKey(response *smithyhttp.Response, me body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1577,8 +1577,8 @@ func awsAwsjson11_deserializeOpErrorDisableKey(response *smithyhttp.Response, me } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1655,9 +1655,9 @@ func awsAwsjson11_deserializeOpErrorDisableKeyRotation(response *smithyhttp.Resp errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1666,7 +1666,7 @@ func awsAwsjson11_deserializeOpErrorDisableKeyRotation(response *smithyhttp.Resp body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1678,8 +1678,8 @@ func awsAwsjson11_deserializeOpErrorDisableKeyRotation(response *smithyhttp.Resp } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1784,9 +1784,9 @@ func awsAwsjson11_deserializeOpErrorDisconnectCustomKeyStore(response *smithyhtt errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1795,7 +1795,7 @@ func awsAwsjson11_deserializeOpErrorDisconnectCustomKeyStore(response *smithyhtt body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1807,8 +1807,8 @@ func awsAwsjson11_deserializeOpErrorDisconnectCustomKeyStore(response *smithyhtt } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1879,9 +1879,9 @@ func awsAwsjson11_deserializeOpErrorEnableKey(response *smithyhttp.Response, met errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1890,7 +1890,7 @@ func awsAwsjson11_deserializeOpErrorEnableKey(response *smithyhttp.Response, met body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1902,8 +1902,8 @@ func awsAwsjson11_deserializeOpErrorEnableKey(response *smithyhttp.Response, met } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -1983,9 +1983,9 @@ func awsAwsjson11_deserializeOpErrorEnableKeyRotation(response *smithyhttp.Respo errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -1994,7 +1994,7 @@ func awsAwsjson11_deserializeOpErrorEnableKeyRotation(response *smithyhttp.Respo body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2006,8 +2006,8 @@ func awsAwsjson11_deserializeOpErrorEnableKeyRotation(response *smithyhttp.Respo } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2112,9 +2112,9 @@ func awsAwsjson11_deserializeOpErrorEncrypt(response *smithyhttp.Response, metad errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2123,7 +2123,7 @@ func awsAwsjson11_deserializeOpErrorEncrypt(response *smithyhttp.Response, metad body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2135,8 +2135,8 @@ func awsAwsjson11_deserializeOpErrorEncrypt(response *smithyhttp.Response, metad } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2244,9 +2244,9 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKey(response *smithyhttp.Respons errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2255,7 +2255,7 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKey(response *smithyhttp.Respons body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2267,8 +2267,8 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKey(response *smithyhttp.Respons } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2376,9 +2376,9 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyPair(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2387,7 +2387,7 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyPair(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2399,8 +2399,8 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyPair(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2511,9 +2511,9 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyPairWithoutPlaintext(response errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2522,7 +2522,7 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyPairWithoutPlaintext(response body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2534,8 +2534,8 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyPairWithoutPlaintext(response } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2646,9 +2646,9 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyWithoutPlaintext(response *sm errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2657,7 +2657,7 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyWithoutPlaintext(response *sm body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2669,8 +2669,8 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyWithoutPlaintext(response *sm } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2778,9 +2778,9 @@ func awsAwsjson11_deserializeOpErrorGenerateMac(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2789,7 +2789,7 @@ func awsAwsjson11_deserializeOpErrorGenerateMac(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2801,8 +2801,8 @@ func awsAwsjson11_deserializeOpErrorGenerateMac(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -2907,9 +2907,9 @@ func awsAwsjson11_deserializeOpErrorGenerateRandom(response *smithyhttp.Response errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -2918,7 +2918,7 @@ func awsAwsjson11_deserializeOpErrorGenerateRandom(response *smithyhttp.Response body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2930,8 +2930,8 @@ func awsAwsjson11_deserializeOpErrorGenerateRandom(response *smithyhttp.Response } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3030,9 +3030,9 @@ func awsAwsjson11_deserializeOpErrorGetKeyPolicy(response *smithyhttp.Response, errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3041,7 +3041,7 @@ func awsAwsjson11_deserializeOpErrorGetKeyPolicy(response *smithyhttp.Response, body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3053,8 +3053,8 @@ func awsAwsjson11_deserializeOpErrorGetKeyPolicy(response *smithyhttp.Response, } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3153,9 +3153,9 @@ func awsAwsjson11_deserializeOpErrorGetKeyRotationStatus(response *smithyhttp.Re errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3164,7 +3164,7 @@ func awsAwsjson11_deserializeOpErrorGetKeyRotationStatus(response *smithyhttp.Re body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3176,8 +3176,8 @@ func awsAwsjson11_deserializeOpErrorGetKeyRotationStatus(response *smithyhttp.Re } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3279,9 +3279,9 @@ func awsAwsjson11_deserializeOpErrorGetParametersForImport(response *smithyhttp. errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3290,7 +3290,7 @@ func awsAwsjson11_deserializeOpErrorGetParametersForImport(response *smithyhttp. body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3302,8 +3302,8 @@ func awsAwsjson11_deserializeOpErrorGetParametersForImport(response *smithyhttp. } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3405,9 +3405,9 @@ func awsAwsjson11_deserializeOpErrorGetPublicKey(response *smithyhttp.Response, errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3416,7 +3416,7 @@ func awsAwsjson11_deserializeOpErrorGetPublicKey(response *smithyhttp.Response, body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3428,8 +3428,8 @@ func awsAwsjson11_deserializeOpErrorGetPublicKey(response *smithyhttp.Response, } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3543,9 +3543,9 @@ func awsAwsjson11_deserializeOpErrorImportKeyMaterial(response *smithyhttp.Respo errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3554,7 +3554,7 @@ func awsAwsjson11_deserializeOpErrorImportKeyMaterial(response *smithyhttp.Respo body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3566,8 +3566,8 @@ func awsAwsjson11_deserializeOpErrorImportKeyMaterial(response *smithyhttp.Respo } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3681,9 +3681,9 @@ func awsAwsjson11_deserializeOpErrorListAliases(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3692,7 +3692,7 @@ func awsAwsjson11_deserializeOpErrorListAliases(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3704,8 +3704,8 @@ func awsAwsjson11_deserializeOpErrorListAliases(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3804,9 +3804,9 @@ func awsAwsjson11_deserializeOpErrorListGrants(response *smithyhttp.Response, me errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3815,7 +3815,7 @@ func awsAwsjson11_deserializeOpErrorListGrants(response *smithyhttp.Response, me body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3827,8 +3827,8 @@ func awsAwsjson11_deserializeOpErrorListGrants(response *smithyhttp.Response, me } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -3933,9 +3933,9 @@ func awsAwsjson11_deserializeOpErrorListKeyPolicies(response *smithyhttp.Respons errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -3944,7 +3944,7 @@ func awsAwsjson11_deserializeOpErrorListKeyPolicies(response *smithyhttp.Respons body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3956,8 +3956,8 @@ func awsAwsjson11_deserializeOpErrorListKeyPolicies(response *smithyhttp.Respons } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4056,9 +4056,9 @@ func awsAwsjson11_deserializeOpErrorListKeys(response *smithyhttp.Response, meta errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4067,7 +4067,7 @@ func awsAwsjson11_deserializeOpErrorListKeys(response *smithyhttp.Response, meta body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4079,8 +4079,8 @@ func awsAwsjson11_deserializeOpErrorListKeys(response *smithyhttp.Response, meta } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4173,9 +4173,9 @@ func awsAwsjson11_deserializeOpErrorListResourceTags(response *smithyhttp.Respon errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4184,7 +4184,7 @@ func awsAwsjson11_deserializeOpErrorListResourceTags(response *smithyhttp.Respon body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4196,8 +4196,8 @@ func awsAwsjson11_deserializeOpErrorListResourceTags(response *smithyhttp.Respon } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4293,9 +4293,9 @@ func awsAwsjson11_deserializeOpErrorListRetirableGrants(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4304,7 +4304,7 @@ func awsAwsjson11_deserializeOpErrorListRetirableGrants(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4316,8 +4316,8 @@ func awsAwsjson11_deserializeOpErrorListRetirableGrants(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4394,9 +4394,9 @@ func awsAwsjson11_deserializeOpErrorPutKeyPolicy(response *smithyhttp.Response, errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4405,7 +4405,7 @@ func awsAwsjson11_deserializeOpErrorPutKeyPolicy(response *smithyhttp.Response, body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4417,8 +4417,8 @@ func awsAwsjson11_deserializeOpErrorPutKeyPolicy(response *smithyhttp.Response, } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4526,9 +4526,9 @@ func awsAwsjson11_deserializeOpErrorReEncrypt(response *smithyhttp.Response, met errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4537,7 +4537,7 @@ func awsAwsjson11_deserializeOpErrorReEncrypt(response *smithyhttp.Response, met body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4549,8 +4549,8 @@ func awsAwsjson11_deserializeOpErrorReEncrypt(response *smithyhttp.Response, met } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4664,9 +4664,9 @@ func awsAwsjson11_deserializeOpErrorReplicateKey(response *smithyhttp.Response, errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4675,7 +4675,7 @@ func awsAwsjson11_deserializeOpErrorReplicateKey(response *smithyhttp.Response, body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4687,8 +4687,8 @@ func awsAwsjson11_deserializeOpErrorReplicateKey(response *smithyhttp.Response, } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4780,9 +4780,9 @@ func awsAwsjson11_deserializeOpErrorRetireGrant(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4791,7 +4791,7 @@ func awsAwsjson11_deserializeOpErrorRetireGrant(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4803,8 +4803,8 @@ func awsAwsjson11_deserializeOpErrorRetireGrant(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -4887,9 +4887,9 @@ func awsAwsjson11_deserializeOpErrorRevokeGrant(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -4898,7 +4898,7 @@ func awsAwsjson11_deserializeOpErrorRevokeGrant(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4910,8 +4910,8 @@ func awsAwsjson11_deserializeOpErrorRevokeGrant(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -5013,9 +5013,9 @@ func awsAwsjson11_deserializeOpErrorScheduleKeyDeletion(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -5024,7 +5024,7 @@ func awsAwsjson11_deserializeOpErrorScheduleKeyDeletion(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5036,8 +5036,8 @@ func awsAwsjson11_deserializeOpErrorScheduleKeyDeletion(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -5136,9 +5136,9 @@ func awsAwsjson11_deserializeOpErrorSign(response *smithyhttp.Response, metadata errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -5147,7 +5147,7 @@ func awsAwsjson11_deserializeOpErrorSign(response *smithyhttp.Response, metadata body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5159,8 +5159,8 @@ func awsAwsjson11_deserializeOpErrorSign(response *smithyhttp.Response, metadata } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -5246,9 +5246,9 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -5257,7 +5257,7 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5269,8 +5269,8 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -5350,9 +5350,9 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -5361,7 +5361,7 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5373,8 +5373,8 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -5451,9 +5451,9 @@ func awsAwsjson11_deserializeOpErrorUpdateAlias(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -5462,7 +5462,7 @@ func awsAwsjson11_deserializeOpErrorUpdateAlias(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5474,8 +5474,8 @@ func awsAwsjson11_deserializeOpErrorUpdateAlias(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -5574,9 +5574,9 @@ func awsAwsjson11_deserializeOpErrorUpdateCustomKeyStore(response *smithyhttp.Re errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -5585,7 +5585,7 @@ func awsAwsjson11_deserializeOpErrorUpdateCustomKeyStore(response *smithyhttp.Re body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5597,8 +5597,8 @@ func awsAwsjson11_deserializeOpErrorUpdateCustomKeyStore(response *smithyhttp.Re } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -5711,9 +5711,9 @@ func awsAwsjson11_deserializeOpErrorUpdateKeyDescription(response *smithyhttp.Re errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -5722,7 +5722,7 @@ func awsAwsjson11_deserializeOpErrorUpdateKeyDescription(response *smithyhttp.Re body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5734,8 +5734,8 @@ func awsAwsjson11_deserializeOpErrorUpdateKeyDescription(response *smithyhttp.Re } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -5812,9 +5812,9 @@ func awsAwsjson11_deserializeOpErrorUpdatePrimaryRegion(response *smithyhttp.Res errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -5823,7 +5823,7 @@ func awsAwsjson11_deserializeOpErrorUpdatePrimaryRegion(response *smithyhttp.Res body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5835,8 +5835,8 @@ func awsAwsjson11_deserializeOpErrorUpdatePrimaryRegion(response *smithyhttp.Res } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -5938,9 +5938,9 @@ func awsAwsjson11_deserializeOpErrorVerify(response *smithyhttp.Response, metada errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -5949,7 +5949,7 @@ func awsAwsjson11_deserializeOpErrorVerify(response *smithyhttp.Response, metada body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -5961,8 +5961,8 @@ func awsAwsjson11_deserializeOpErrorVerify(response *smithyhttp.Response, metada } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -6073,9 +6073,9 @@ func awsAwsjson11_deserializeOpErrorVerifyMac(response *smithyhttp.Response, met errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -6084,7 +6084,7 @@ func awsAwsjson11_deserializeOpErrorVerifyMac(response *smithyhttp.Response, met body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -6096,8 +6096,8 @@ func awsAwsjson11_deserializeOpErrorVerifyMac(response *smithyhttp.Response, met } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -11148,6 +11148,19 @@ func awsAwsjson11_deserializeOpDocumentDecryptOutput(v **DecryptOutput, value in for key, value := range shape { switch key { + case "CiphertextForRecipient": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextForRecipient = dv + } + case "EncryptionAlgorithm": if value != nil { jtv, ok := value.(string) @@ -11437,6 +11450,19 @@ func awsAwsjson11_deserializeOpDocumentGenerateDataKeyOutput(v **GenerateDataKey sv.CiphertextBlob = dv } + case "CiphertextForRecipient": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextForRecipient = dv + } + case "KeyId": if value != nil { jtv, ok := value.(string) @@ -11490,6 +11516,19 @@ func awsAwsjson11_deserializeOpDocumentGenerateDataKeyPairOutput(v **GenerateDat for key, value := range shape { switch key { + case "CiphertextForRecipient": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextForRecipient = dv + } + case "KeyId": if value != nil { jtv, ok := value.(string) @@ -11768,6 +11807,19 @@ func awsAwsjson11_deserializeOpDocumentGenerateRandomOutput(v **GenerateRandomOu for key, value := range shape { switch key { + case "CiphertextForRecipient": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextForRecipient = dv + } + case "Plaintext": if value != nil { jtv, ok := value.(string) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go index 499e4889..266b4e11 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go @@ -1,78 +1,64 @@ // Code generated by smithy-go-codegen DO NOT EDIT. -// Package kms provides the API client, operations, and parameter types for AWS Key -// Management Service. +// Package kms provides the API client, operations, and parameter types for AWS +// Key Management Service. // // Key Management Service Key Management Service (KMS) is an encryption and key // management web service. This guide describes the KMS operations that you can -// call programmatically. For general information about KMS, see the Key -// Management Service Developer Guide -// (https://docs.aws.amazon.com/kms/latest/developerguide/). KMS has replaced the -// term customer master key (CMK) with KMS key and KMS key. The concept has not -// changed. To prevent breaking changes, KMS is keeping some variations of this -// term. Amazon Web Services provides SDKs that consist of libraries and sample -// code for various programming languages and platforms (Java, Ruby, .Net, macOS, -// Android, etc.). The SDKs provide a convenient way to create programmatic access -// to KMS and other Amazon Web Services services. For example, the SDKs take care -// of tasks such as signing requests (see below), managing errors, and retrying -// requests automatically. For more information about the Amazon Web Services SDKs, -// including how to download and install them, see Tools for Amazon Web Services -// (http://aws.amazon.com/tools/). We recommend that you use the Amazon Web -// Services SDKs to make programmatic API calls to KMS. If you need to use FIPS -// 140-2 validated cryptographic modules when communicating with Amazon Web -// Services, use the FIPS endpoint in your preferred Amazon Web Services Region. -// For more information about the available FIPS endpoints, see Service endpoints -// (https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region) in the Key -// Management Service topic of the Amazon Web Services General Reference. All KMS -// API calls must be signed and be transmitted using Transport Layer Security -// (TLS). KMS recommends you always use the latest supported TLS version. Clients -// must also support cipher suites with Perfect Forward Secrecy (PFS) such as -// Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman +// call programmatically. For general information about KMS, see the Key +// Management Service Developer Guide (https://docs.aws.amazon.com/kms/latest/developerguide/) +// . KMS has replaced the term customer master key (CMK) with KMS key and KMS key. +// The concept has not changed. To prevent breaking changes, KMS is keeping some +// variations of this term. Amazon Web Services provides SDKs that consist of +// libraries and sample code for various programming languages and platforms (Java, +// Ruby, .Net, macOS, Android, etc.). The SDKs provide a convenient way to create +// programmatic access to KMS and other Amazon Web Services services. For example, +// the SDKs take care of tasks such as signing requests (see below), managing +// errors, and retrying requests automatically. For more information about the +// Amazon Web Services SDKs, including how to download and install them, see Tools +// for Amazon Web Services (http://aws.amazon.com/tools/) . We recommend that you +// use the Amazon Web Services SDKs to make programmatic API calls to KMS. If you +// need to use FIPS 140-2 validated cryptographic modules when communicating with +// Amazon Web Services, use the FIPS endpoint in your preferred Amazon Web Services +// Region. For more information about the available FIPS endpoints, see Service +// endpoints (https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region) in +// the Key Management Service topic of the Amazon Web Services General Reference. +// All KMS API calls must be signed and be transmitted using Transport Layer +// Security (TLS). KMS recommends you always use the latest supported TLS version. +// Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such +// as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman // (ECDHE). Most modern systems such as Java 7 and later support these modes. -// Signing Requests Requests must be signed by using an access key ID and a secret +// Signing Requests Requests must be signed using an access key ID and a secret // access key. We strongly recommend that you do not use your Amazon Web Services -// account (root) access key ID and secret access key for everyday work with KMS. -// Instead, use the access key ID and secret access key for an IAM user. You can -// also use the Amazon Web Services Security Token Service to generate temporary -// security credentials that you can use to sign requests. All KMS operations -// require Signature Version 4 -// (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). -// Logging API Requests KMS supports CloudTrail, a service that logs Amazon Web +// account root access key ID and secret access key for everyday work. You can use +// the access key ID and secret access key for an IAM user or you can use the +// Security Token Service (STS) to generate temporary security credentials and use +// those to sign requests. All KMS requests must be signed with Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) +// . Logging API Requests KMS supports CloudTrail, a service that logs Amazon Web // Services API calls and related events for your Amazon Web Services account and // delivers them to an Amazon S3 bucket that you specify. By using the information // collected by CloudTrail, you can determine what requests were made to KMS, who // made the request, when it was made, and so on. To learn more about CloudTrail, // including how to turn it on and find your log files, see the CloudTrail User -// Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/). Additional +// Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/) . Additional // Resources For more information about credentials and request signing, see the // following: +// - Amazon Web Services Security Credentials (https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) +// - This topic provides general information about the types of credentials used to +// access Amazon Web Services. +// - Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) +// - This section of the IAM User Guide describes how to create and use temporary +// security credentials. +// - Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) +// - This set of topics walks you through the process of signing a request using an +// access key ID and a secret access key. // -// * Amazon Web Services Security Credentials -// (https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) - -// This topic provides general information about the types of credentials used to -// access Amazon Web Services. -// -// * Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) - -// This section of the IAM User Guide describes how to create and use temporary -// security credentials. -// -// * Signature Version 4 Signing Process -// (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) - This -// set of topics walks you through the process of signing a request using an access -// key ID and a secret access key. -// -// Commonly Used API Operations Of the API -// operations discussed in this guide, the following will prove the most useful for -// most applications. You will likely perform operations other than these, such as -// creating keys and assigning policies, by using the console. -// -// * Encrypt -// -// * -// Decrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyWithoutPlaintext +// Commonly Used API Operations Of the API operations discussed in this guide, the +// following will prove the most useful for most applications. You will likely +// perform operations other than these, such as creating keys and assigning +// policies, by using the console. +// - Encrypt +// - Decrypt +// - GenerateDataKey +// - GenerateDataKeyWithoutPlaintext package kms diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go index 9d0b8340..66a7229b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go @@ -3,4 +3,4 @@ package kms // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.20.0" +const goModuleVersion = "1.22.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go index 538d1ba2..cb5a6b5a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go @@ -89,6 +89,8 @@ var partitionRegexp = struct { AwsCn *regexp.Regexp AwsIso *regexp.Regexp AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp AwsUsGov *regexp.Regexp }{ @@ -96,6 +98,8 @@ var partitionRegexp = struct { AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), } @@ -324,6 +328,15 @@ var defaultPartitions = endpoints.Partitions{ }, Deprecated: aws.TrueTernary, }, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + }, endpoints.EndpointKey{ Region: "ap-southeast-4-fips", }: endpoints.Endpoint{ @@ -495,6 +508,14 @@ var defaultPartitions = endpoints.Partitions{ }, Deprecated: aws.TrueTernary, }, + endpoints.EndpointKey{ + Region: "il-central-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "il-central-1", + }, + }, endpoints.EndpointKey{ Region: "me-central-1", }: endpoints.Endpoint{}, @@ -784,6 +805,48 @@ var defaultPartitions = endpoints.Partitions{ }, }, }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, { ID: "aws-us-gov", Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go index 39d8aaa2..01a5d7c5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go @@ -2819,6 +2819,23 @@ func awsAwsjson11_serializeDocumentGrantTokenList(v []string, value smithyjson.V return nil } +func awsAwsjson11_serializeDocumentRecipientInfo(v *types.RecipientInfo, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttestationDocument != nil { + ok := object.Key("AttestationDocument") + ok.Base64EncodeBytes(v.AttestationDocument) + } + + if len(v.KeyEncryptionAlgorithm) > 0 { + ok := object.Key("KeyEncryptionAlgorithm") + ok.String(string(v.KeyEncryptionAlgorithm)) + } + + return nil +} + func awsAwsjson11_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3122,6 +3139,13 @@ func awsAwsjson11_serializeOpDocumentDecryptInput(v *DecryptInput, value smithyj ok.String(*v.KeyId) } + if v.Recipient != nil { + ok := object.Key("Recipient") + if err := awsAwsjson11_serializeDocumentRecipientInfo(v.Recipient, ok); err != nil { + return err + } + } + return nil } @@ -3336,6 +3360,13 @@ func awsAwsjson11_serializeOpDocumentGenerateDataKeyInput(v *GenerateDataKeyInpu ok.Integer(*v.NumberOfBytes) } + if v.Recipient != nil { + ok := object.Key("Recipient") + if err := awsAwsjson11_serializeDocumentRecipientInfo(v.Recipient, ok); err != nil { + return err + } + } + return nil } @@ -3367,6 +3398,13 @@ func awsAwsjson11_serializeOpDocumentGenerateDataKeyPairInput(v *GenerateDataKey ok.String(string(v.KeyPairSpec)) } + if v.Recipient != nil { + ok := object.Key("Recipient") + if err := awsAwsjson11_serializeDocumentRecipientInfo(v.Recipient, ok); err != nil { + return err + } + } + return nil } @@ -3480,6 +3518,13 @@ func awsAwsjson11_serializeOpDocumentGenerateRandomInput(v *GenerateRandomInput, ok.Integer(*v.NumberOfBytes) } + if v.Recipient != nil { + ok := object.Key("Recipient") + if err := awsAwsjson11_serializeDocumentRecipientInfo(v.Recipient, ok); err != nil { + return err + } + } + return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go index 923e3abc..f5da8f5b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go @@ -6,9 +6,11 @@ type AlgorithmSpec string // Enum values for AlgorithmSpec const ( - AlgorithmSpecRsaesPkcs1V15 AlgorithmSpec = "RSAES_PKCS1_V1_5" - AlgorithmSpecRsaesOaepSha1 AlgorithmSpec = "RSAES_OAEP_SHA_1" - AlgorithmSpecRsaesOaepSha256 AlgorithmSpec = "RSAES_OAEP_SHA_256" + AlgorithmSpecRsaesPkcs1V15 AlgorithmSpec = "RSAES_PKCS1_V1_5" + AlgorithmSpecRsaesOaepSha1 AlgorithmSpec = "RSAES_OAEP_SHA_1" + AlgorithmSpecRsaesOaepSha256 AlgorithmSpec = "RSAES_OAEP_SHA_256" + AlgorithmSpecRsaAesKeyWrapSha1 AlgorithmSpec = "RSA_AES_KEY_WRAP_SHA_1" + AlgorithmSpecRsaAesKeyWrapSha256 AlgorithmSpec = "RSA_AES_KEY_WRAP_SHA_256" ) // Values returns all known values for AlgorithmSpec. Note that this can be @@ -19,6 +21,8 @@ func (AlgorithmSpec) Values() []AlgorithmSpec { "RSAES_PKCS1_V1_5", "RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256", + "RSA_AES_KEY_WRAP_SHA_1", + "RSA_AES_KEY_WRAP_SHA_256", } } @@ -115,8 +119,8 @@ const ( CustomerMasterKeySpecSm2 CustomerMasterKeySpec = "SM2" ) -// Values returns all known values for CustomerMasterKeySpec. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The +// Values returns all known values for CustomerMasterKeySpec. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The // ordering of this slice is not guaranteed to be stable across updates. func (CustomerMasterKeySpec) Values() []CustomerMasterKeySpec { return []CustomerMasterKeySpec{ @@ -288,6 +292,22 @@ func (GrantOperation) Values() []GrantOperation { } } +type KeyEncryptionMechanism string + +// Enum values for KeyEncryptionMechanism +const ( + KeyEncryptionMechanismRsaesOaepSha256 KeyEncryptionMechanism = "RSAES_OAEP_SHA_256" +) + +// Values returns all known values for KeyEncryptionMechanism. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (KeyEncryptionMechanism) Values() []KeyEncryptionMechanism { + return []KeyEncryptionMechanism{ + "RSAES_OAEP_SHA_256", + } +} + type KeyManagerType string // Enum values for KeyManagerType @@ -385,9 +405,9 @@ const ( KeyUsageTypeGenerateVerifyMac KeyUsageType = "GENERATE_VERIFY_MAC" ) -// Values returns all known values for KeyUsageType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// Values returns all known values for KeyUsageType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. func (KeyUsageType) Values() []KeyUsageType { return []KeyUsageType{ "SIGN_VERIFY", @@ -515,6 +535,8 @@ type WrappingKeySpec string // Enum values for WrappingKeySpec const ( WrappingKeySpecRsa2048 WrappingKeySpec = "RSA_2048" + WrappingKeySpecRsa3072 WrappingKeySpec = "RSA_3072" + WrappingKeySpecRsa4096 WrappingKeySpec = "RSA_4096" ) // Values returns all known values for WrappingKeySpec. Note that this can be @@ -523,6 +545,8 @@ const ( func (WrappingKeySpec) Values() []WrappingKeySpec { return []WrappingKeySpec{ "RSA_2048", + "RSA_3072", + "RSA_4096", } } @@ -534,9 +558,9 @@ const ( XksProxyConnectivityTypeVpcEndpointService XksProxyConnectivityType = "VPC_ENDPOINT_SERVICE" ) -// Values returns all known values for XksProxyConnectivityType. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// Values returns all known values for XksProxyConnectivityType. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. func (XksProxyConnectivityType) Values() []XksProxyConnectivityType { return []XksProxyConnectivityType{ "PUBLIC_ENDPOINT", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go index 07e8e492..ec1b379e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go @@ -27,7 +27,7 @@ func (e *AlreadyExistsException) ErrorMessage() string { return *e.Message } func (e *AlreadyExistsException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "AlreadyExistsException" } return *e.ErrorCodeOverride @@ -39,8 +39,7 @@ func (e *AlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy. // history with an CloudHSM key store in the account. Each CloudHSM key store in // the account must be associated with a different CloudHSM cluster. CloudHSM // clusters that share a backup history have the same cluster certificate. To view -// the cluster certificate of an CloudHSM cluster, use the DescribeClusters -// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) +// the cluster certificate of an CloudHSM cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) // operation. type CloudHsmClusterInUseException struct { Message *string @@ -60,7 +59,7 @@ func (e *CloudHsmClusterInUseException) ErrorMessage() string { return *e.Message } func (e *CloudHsmClusterInUseException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "CloudHsmClusterInUseException" } return *e.ErrorCodeOverride @@ -69,41 +68,30 @@ func (e *CloudHsmClusterInUseException) ErrorFault() smithy.ErrorFault { return // The request was rejected because the associated CloudHSM cluster did not meet // the configuration requirements for an CloudHSM key store. +// - The CloudHSM cluster must be configured with private subnets in at least +// two different Availability Zones in the Region. +// - The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) +// (cloudhsm-cluster--sg) must include inbound rules and outbound rules that allow +// TCP traffic on ports 2223-2225. The Source in the inbound rules and the +// Destination in the outbound rules must match the security group ID. These rules +// are set by default when you create the CloudHSM cluster. Do not delete or change +// them. To get information about a particular security group, use the +// DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) +// operation. +// - The CloudHSM cluster must contain at least as many HSMs as the operation +// requires. To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) +// operation. For the CreateCustomKeyStore , UpdateCustomKeyStore , and CreateKey +// operations, the CloudHSM cluster must have at least two active HSMs, each in a +// different Availability Zone. For the ConnectCustomKeyStore operation, the +// CloudHSM must contain at least one active HSM. // -// * The CloudHSM -// cluster must be configured with private subnets in at least two different -// Availability Zones in the Region. -// -// * The security group for the cluster -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// (cloudhsm-cluster--sg) must include inbound rules and outbound rules that allow -// TCP traffic on ports 2223-2225. The Source in the inbound rules and the -// Destination in the outbound rules must match the security group ID. These rules -// are set by default when you create the CloudHSM cluster. Do not delete or change -// them. To get information about a particular security group, use the -// DescribeSecurityGroups -// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) -// operation. -// -// * The CloudHSM cluster must contain at least as many HSMs as the -// operation requires. To add HSMs, use the CloudHSM CreateHsm -// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey -// operations, the CloudHSM cluster must have at least two active HSMs, each in a -// different Availability Zone. For the ConnectCustomKeyStore operation, the -// CloudHSM must contain at least one active HSM. -// -// For information about the -// requirements for an CloudHSM cluster that is associated with an CloudHSM key -// store, see Assemble the Prerequisites -// (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) +// For information about the requirements for an CloudHSM cluster that is +// associated with an CloudHSM key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) // in the Key Management Service Developer Guide. For information about creating a -// private subnet for an CloudHSM cluster, see Create a Private Subnet -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) in -// the CloudHSM User Guide. For information about cluster security groups, see -// Configure a Default Security Group -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) in the -// CloudHSM User Guide . +// private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) +// in the CloudHSM User Guide. For information about cluster security groups, see +// Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) +// in the CloudHSM User Guide . type CloudHsmClusterInvalidConfigurationException struct { Message *string @@ -122,7 +110,7 @@ func (e *CloudHsmClusterInvalidConfigurationException) ErrorMessage() string { return *e.Message } func (e *CloudHsmClusterInvalidConfigurationException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "CloudHsmClusterInvalidConfigurationException" } return *e.ErrorCodeOverride @@ -133,9 +121,8 @@ func (e *CloudHsmClusterInvalidConfigurationException) ErrorFault() smithy.Error // The request was rejected because the CloudHSM cluster associated with the // CloudHSM key store is not active. Initialize and activate the cluster and try -// the command again. For detailed instructions, see Getting Started -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) in -// the CloudHSM User Guide. +// the command again. For detailed instructions, see Getting Started (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) +// in the CloudHSM User Guide. type CloudHsmClusterNotActiveException struct { Message *string @@ -154,7 +141,7 @@ func (e *CloudHsmClusterNotActiveException) ErrorMessage() string { return *e.Message } func (e *CloudHsmClusterNotActiveException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "CloudHsmClusterNotActiveException" } return *e.ErrorCodeOverride @@ -181,7 +168,7 @@ func (e *CloudHsmClusterNotFoundException) ErrorMessage() string { return *e.Message } func (e *CloudHsmClusterNotFoundException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "CloudHsmClusterNotFoundException" } return *e.ErrorCodeOverride @@ -195,8 +182,7 @@ func (e *CloudHsmClusterNotFoundException) ErrorFault() smithy.ErrorFault { retu // clusters that were created from a backup of the current cluster, and clusters // that were created from the same backup that produced the current cluster. // CloudHSM clusters that share a backup history have the same cluster certificate. -// To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters -// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) +// To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) // operation. type CloudHsmClusterNotRelatedException struct { Message *string @@ -216,7 +202,7 @@ func (e *CloudHsmClusterNotRelatedException) ErrorMessage() string { return *e.Message } func (e *CloudHsmClusterNotRelatedException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "CloudHsmClusterNotRelatedException" } return *e.ErrorCodeOverride @@ -247,41 +233,34 @@ func (e *CustomKeyStoreHasCMKsException) ErrorMessage() string { return *e.Message } func (e *CustomKeyStoreHasCMKsException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "CustomKeyStoreHasCMKsException" } return *e.ErrorCodeOverride } func (e *CustomKeyStoreHasCMKsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The request was rejected because of the ConnectionState of the custom key store. -// To get the ConnectionState of a custom key store, use the +// The request was rejected because of the ConnectionState of the custom key +// store. To get the ConnectionState of a custom key store, use the // DescribeCustomKeyStores operation. This exception is thrown under the following // conditions: -// -// * You requested the ConnectCustomKeyStore operation on a custom key -// store with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. To reconnect a custom key store in a -// FAILED state, disconnect it (DisconnectCustomKeyStore), then connect it -// (ConnectCustomKeyStore). -// -// * You requested the CreateKey operation in a custom -// key store that is not connected. This operations is valid only when the custom -// key store ConnectionState is CONNECTED. -// -// * You requested the -// DisconnectCustomKeyStore operation on a custom key store with a ConnectionState -// of DISCONNECTING or DISCONNECTED. This operation is valid for all other -// ConnectionState values. -// -// * You requested the UpdateCustomKeyStore or -// DeleteCustomKeyStore operation on a custom key store that is not disconnected. -// This operation is valid only when the custom key store ConnectionState is -// DISCONNECTED. -// -// * You requested the GenerateRandom operation in an CloudHSM key -// store that is not connected. This operation is valid only when the CloudHSM key -// store ConnectionState is CONNECTED. +// - You requested the ConnectCustomKeyStore operation on a custom key store with +// a ConnectionState of DISCONNECTING or FAILED . This operation is valid for all +// other ConnectionState values. To reconnect a custom key store in a FAILED +// state, disconnect it ( DisconnectCustomKeyStore ), then connect it ( +// ConnectCustomKeyStore ). +// - You requested the CreateKey operation in a custom key store that is not +// connected. This operations is valid only when the custom key store +// ConnectionState is CONNECTED . +// - You requested the DisconnectCustomKeyStore operation on a custom key store +// with a ConnectionState of DISCONNECTING or DISCONNECTED . This operation is +// valid for all other ConnectionState values. +// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation on +// a custom key store that is not disconnected. This operation is valid only when +// the custom key store ConnectionState is DISCONNECTED . +// - You requested the GenerateRandom operation in an CloudHSM key store that is +// not connected. This operation is valid only when the CloudHSM key store +// ConnectionState is CONNECTED . type CustomKeyStoreInvalidStateException struct { Message *string @@ -300,7 +279,7 @@ func (e *CustomKeyStoreInvalidStateException) ErrorMessage() string { return *e.Message } func (e *CustomKeyStoreInvalidStateException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "CustomKeyStoreInvalidStateException" } return *e.ErrorCodeOverride @@ -330,7 +309,7 @@ func (e *CustomKeyStoreNameInUseException) ErrorMessage() string { return *e.Message } func (e *CustomKeyStoreNameInUseException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "CustomKeyStoreNameInUseException" } return *e.ErrorCodeOverride @@ -357,7 +336,7 @@ func (e *CustomKeyStoreNotFoundException) ErrorMessage() string { return *e.Message } func (e *CustomKeyStoreNotFoundException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "CustomKeyStoreNotFoundException" } return *e.ErrorCodeOverride @@ -384,7 +363,7 @@ func (e *DependencyTimeoutException) ErrorMessage() string { return *e.Message } func (e *DependencyTimeoutException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "DependencyTimeoutException" } return *e.ErrorCodeOverride @@ -410,7 +389,7 @@ func (e *DisabledException) ErrorMessage() string { return *e.Message } func (e *DisabledException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "DisabledException" } return *e.ErrorCodeOverride @@ -438,7 +417,7 @@ func (e *ExpiredImportTokenException) ErrorMessage() string { return *e.Message } func (e *ExpiredImportTokenException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "ExpiredImportTokenException" } return *e.ErrorCodeOverride @@ -466,7 +445,7 @@ func (e *IncorrectKeyException) ErrorMessage() string { return *e.Message } func (e *IncorrectKeyException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "IncorrectKeyException" } return *e.ErrorCodeOverride @@ -494,7 +473,7 @@ func (e *IncorrectKeyMaterialException) ErrorMessage() string { return *e.Message } func (e *IncorrectKeyMaterialException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "IncorrectKeyMaterialException" } return *e.ErrorCodeOverride @@ -503,9 +482,9 @@ func (e *IncorrectKeyMaterialException) ErrorFault() smithy.ErrorFault { return // The request was rejected because the trust anchor certificate in the request to // create an CloudHSM key store is not the trust anchor certificate for the -// specified CloudHSM cluster. When you initialize the CloudHSM cluster -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), -// you create the trust anchor certificate and save it in the customerCA.crt file. +// specified CloudHSM cluster. When you initialize the CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr) +// , you create the trust anchor certificate and save it in the customerCA.crt +// file. type IncorrectTrustAnchorException struct { Message *string @@ -524,7 +503,7 @@ func (e *IncorrectTrustAnchorException) ErrorMessage() string { return *e.Message } func (e *IncorrectTrustAnchorException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "IncorrectTrustAnchorException" } return *e.ErrorCodeOverride @@ -550,7 +529,7 @@ func (e *InvalidAliasNameException) ErrorMessage() string { return *e.Message } func (e *InvalidAliasNameException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidAliasNameException" } return *e.ErrorCodeOverride @@ -577,7 +556,7 @@ func (e *InvalidArnException) ErrorMessage() string { return *e.Message } func (e *InvalidArnException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidArnException" } return *e.ErrorCodeOverride @@ -607,7 +586,7 @@ func (e *InvalidCiphertextException) ErrorMessage() string { return *e.Message } func (e *InvalidCiphertextException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidCiphertextException" } return *e.ErrorCodeOverride @@ -633,7 +612,7 @@ func (e *InvalidGrantIdException) ErrorMessage() string { return *e.Message } func (e *InvalidGrantIdException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidGrantIdException" } return *e.ErrorCodeOverride @@ -659,7 +638,7 @@ func (e *InvalidGrantTokenException) ErrorMessage() string { return *e.Message } func (e *InvalidGrantTokenException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidGrantTokenException" } return *e.ErrorCodeOverride @@ -686,7 +665,7 @@ func (e *InvalidImportTokenException) ErrorMessage() string { return *e.Message } func (e *InvalidImportTokenException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidImportTokenException" } return *e.ErrorCodeOverride @@ -694,21 +673,17 @@ func (e *InvalidImportTokenException) ErrorCode() string { func (e *InvalidImportTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The request was rejected for one of the following reasons: +// - The KeyUsage value of the KMS key is incompatible with the API operation. +// - The encryption algorithm or signing algorithm specified for the operation +// is incompatible with the type of key material in the KMS key (KeySpec ). // -// * The KeyUsage value -// of the KMS key is incompatible with the API operation. -// -// * The encryption -// algorithm or signing algorithm specified for the operation is incompatible with -// the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, -// re-encrypting, and generating data keys, the KeyUsage must be ENCRYPT_DECRYPT. -// For signing and verifying messages, the KeyUsage must be SIGN_VERIFY. For -// generating and verifying message authentication codes (MACs), the KeyUsage must -// be GENERATE_VERIFY_MAC. To find the KeyUsage of a KMS key, use the DescribeKey -// operation. To find the encryption or signing algorithms supported for a -// particular KMS key, use the DescribeKey operation. +// For encrypting, decrypting, re-encrypting, and generating data keys, the +// KeyUsage must be ENCRYPT_DECRYPT . For signing and verifying messages, the +// KeyUsage must be SIGN_VERIFY . For generating and verifying message +// authentication codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC . To find +// the KeyUsage of a KMS key, use the DescribeKey operation. To find the +// encryption or signing algorithms supported for a particular KMS key, use the +// DescribeKey operation. type InvalidKeyUsageException struct { Message *string @@ -727,7 +702,7 @@ func (e *InvalidKeyUsageException) ErrorMessage() string { return *e.Message } func (e *InvalidKeyUsageException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidKeyUsageException" } return *e.ErrorCodeOverride @@ -754,7 +729,7 @@ func (e *InvalidMarkerException) ErrorMessage() string { return *e.Message } func (e *InvalidMarkerException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidMarkerException" } return *e.ErrorCodeOverride @@ -781,15 +756,15 @@ func (e *KeyUnavailableException) ErrorMessage() string { return *e.Message } func (e *KeyUnavailableException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "KeyUnavailableException" } return *e.ErrorCodeOverride } func (e *KeyUnavailableException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } -// The request was rejected because an internal exception occurred. The request can -// be retried. +// The request was rejected because an internal exception occurred. The request +// can be retried. type KMSInternalException struct { Message *string @@ -808,16 +783,16 @@ func (e *KMSInternalException) ErrorMessage() string { return *e.Message } func (e *KMSInternalException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "KMSInternalException" } return *e.ErrorCodeOverride } func (e *KMSInternalException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } -// The request was rejected because the HMAC verification failed. HMAC verification -// fails when the HMAC computed by using the specified message, HMAC KMS key, and -// MAC algorithm does not match the HMAC specified in the request. +// The request was rejected because the HMAC verification failed. HMAC +// verification fails when the HMAC computed by using the specified message, HMAC +// KMS key, and MAC algorithm does not match the HMAC specified in the request. type KMSInvalidMacException struct { Message *string @@ -836,7 +811,7 @@ func (e *KMSInvalidMacException) ErrorMessage() string { return *e.Message } func (e *KMSInvalidMacException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "KMSInvalidMacException" } return *e.ErrorCodeOverride @@ -864,7 +839,7 @@ func (e *KMSInvalidSignatureException) ErrorMessage() string { return *e.Message } func (e *KMSInvalidSignatureException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "KMSInvalidSignatureException" } return *e.ErrorCodeOverride @@ -873,18 +848,13 @@ func (e *KMSInvalidSignatureException) ErrorFault() smithy.ErrorFault { return s // The request was rejected because the state of the specified resource is not // valid for this request. This exceptions means one of the following: -// -// * The key -// state of the KMS key is not compatible with the operation. To find the key -// state, use the DescribeKey operation. For more information about which key -// states are compatible with each KMS operation, see Key states of KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the -// Key Management Service Developer Guide . -// -// * For cryptographic operations on KMS -// keys in custom key stores, this exception represents a general failure with many -// possible causes. To identify the cause, see the error message that accompanies -// the exception. +// - The key state of the KMS key is not compatible with the operation. To find +// the key state, use the DescribeKey operation. For more information about which +// key states are compatible with each KMS operation, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide . +// - For cryptographic operations on KMS keys in custom key stores, this +// exception represents a general failure with many possible causes. To identify +// the cause, see the error message that accompanies the exception. type KMSInvalidStateException struct { Message *string @@ -903,16 +873,16 @@ func (e *KMSInvalidStateException) ErrorMessage() string { return *e.Message } func (e *KMSInvalidStateException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "KMSInvalidStateException" } return *e.ErrorCodeOverride } func (e *KMSInvalidStateException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The request was rejected because a quota was exceeded. For more information, see -// Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) in -// the Key Management Service Developer Guide. +// The request was rejected because a quota was exceeded. For more information, +// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) +// in the Key Management Service Developer Guide. type LimitExceededException struct { Message *string @@ -931,7 +901,7 @@ func (e *LimitExceededException) ErrorMessage() string { return *e.Message } func (e *LimitExceededException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "LimitExceededException" } return *e.ErrorCodeOverride @@ -958,7 +928,7 @@ func (e *MalformedPolicyDocumentException) ErrorMessage() string { return *e.Message } func (e *MalformedPolicyDocumentException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "MalformedPolicyDocumentException" } return *e.ErrorCodeOverride @@ -985,7 +955,7 @@ func (e *NotFoundException) ErrorMessage() string { return *e.Message } func (e *NotFoundException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "NotFoundException" } return *e.ErrorCodeOverride @@ -1011,7 +981,7 @@ func (e *TagException) ErrorMessage() string { return *e.Message } func (e *TagException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "TagException" } return *e.ErrorCodeOverride @@ -1038,16 +1008,16 @@ func (e *UnsupportedOperationException) ErrorMessage() string { return *e.Message } func (e *UnsupportedOperationException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "UnsupportedOperationException" } return *e.ErrorCodeOverride } func (e *UnsupportedOperationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The request was rejected because the (XksKeyId) is already associated with a KMS -// key in this external key store. Each KMS key in an external key store must be -// associated with a different external key. +// The request was rejected because the ( XksKeyId ) is already associated with a +// KMS key in this external key store. Each KMS key in an external key store must +// be associated with a different external key. type XksKeyAlreadyInUseException struct { Message *string @@ -1066,7 +1036,7 @@ func (e *XksKeyAlreadyInUseException) ErrorMessage() string { return *e.Message } func (e *XksKeyAlreadyInUseException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "XksKeyAlreadyInUseException" } return *e.ErrorCodeOverride @@ -1095,7 +1065,7 @@ func (e *XksKeyInvalidConfigurationException) ErrorMessage() string { return *e.Message } func (e *XksKeyInvalidConfigurationException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "XksKeyInvalidConfigurationException" } return *e.ErrorCodeOverride @@ -1104,13 +1074,13 @@ func (e *XksKeyInvalidConfigurationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The request was rejected because the external key store proxy could not find the -// external key. This exception is thrown when the value of the XksKeyId parameter -// doesn't identify a key in the external key manager associated with the external -// key proxy. Verify that the XksKeyId represents an existing key in the external -// key manager. Use the key identifier that the external key store proxy uses to -// identify the key. For details, see the documentation provided with your external -// key store proxy or key manager. +// The request was rejected because the external key store proxy could not find +// the external key. This exception is thrown when the value of the XksKeyId +// parameter doesn't identify a key in the external key manager associated with the +// external key proxy. Verify that the XksKeyId represents an existing key in the +// external key manager. Use the key identifier that the external key store proxy +// uses to identify the key. For details, see the documentation provided with your +// external key store proxy or key manager. type XksKeyNotFoundException struct { Message *string @@ -1129,18 +1099,18 @@ func (e *XksKeyNotFoundException) ErrorMessage() string { return *e.Message } func (e *XksKeyNotFoundException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "XksKeyNotFoundException" } return *e.ErrorCodeOverride } func (e *XksKeyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The request was rejected because the proxy credentials failed to authenticate to -// the specified external key store proxy. The specified external key store proxy -// rejected a status request from KMS due to invalid credentials. This can indicate -// an error in the credentials or in the identification of the external key store -// proxy. +// The request was rejected because the proxy credentials failed to authenticate +// to the specified external key store proxy. The specified external key store +// proxy rejected a status request from KMS due to invalid credentials. This can +// indicate an error in the credentials or in the identification of the external +// key store proxy. type XksProxyIncorrectAuthenticationCredentialException struct { Message *string @@ -1159,7 +1129,7 @@ func (e *XksProxyIncorrectAuthenticationCredentialException) ErrorMessage() stri return *e.Message } func (e *XksProxyIncorrectAuthenticationCredentialException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "XksProxyIncorrectAuthenticationCredentialException" } return *e.ErrorCodeOverride @@ -1189,7 +1159,7 @@ func (e *XksProxyInvalidConfigurationException) ErrorMessage() string { return *e.Message } func (e *XksProxyInvalidConfigurationException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "XksProxyInvalidConfigurationException" } return *e.ErrorCodeOverride @@ -1198,9 +1168,9 @@ func (e *XksProxyInvalidConfigurationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// KMS cannot interpret the response it received from the external key store proxy. -// The problem might be a poorly constructed response, but it could also be a -// transient network issue. If you see this error repeatedly, report it to the +// KMS cannot interpret the response it received from the external key store +// proxy. The problem might be a poorly constructed response, but it could also be +// a transient network issue. If you see this error repeatedly, report it to the // proxy vendor. type XksProxyInvalidResponseException struct { Message *string @@ -1220,17 +1190,17 @@ func (e *XksProxyInvalidResponseException) ErrorMessage() string { return *e.Message } func (e *XksProxyInvalidResponseException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "XksProxyInvalidResponseException" } return *e.ErrorCodeOverride } func (e *XksProxyInvalidResponseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The request was rejected because the concatenation of the XksProxyUriEndpoint is -// already associated with an external key store in the Amazon Web Services account -// and Region. Each external key store in an account and Region must use a unique -// external key store proxy address. +// The request was rejected because the concatenation of the XksProxyUriEndpoint +// is already associated with an external key store in the Amazon Web Services +// account and Region. Each external key store in an account and Region must use a +// unique external key store proxy address. type XksProxyUriEndpointInUseException struct { Message *string @@ -1249,7 +1219,7 @@ func (e *XksProxyUriEndpointInUseException) ErrorMessage() string { return *e.Message } func (e *XksProxyUriEndpointInUseException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "XksProxyUriEndpointInUseException" } return *e.ErrorCodeOverride @@ -1278,14 +1248,14 @@ func (e *XksProxyUriInUseException) ErrorMessage() string { return *e.Message } func (e *XksProxyUriInUseException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "XksProxyUriInUseException" } return *e.ErrorCodeOverride } func (e *XksProxyUriInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// KMS was unable to reach the specified XksProxyUriPath. The path must be +// KMS was unable to reach the specified XksProxyUriPath . The path must be // reachable before you create the external key store or update its settings. This // exception is also thrown when the external key store proxy response to a // GetHealthStatus request indicates that all external key manager instances are @@ -1308,7 +1278,7 @@ func (e *XksProxyUriUnreachableException) ErrorMessage() string { return *e.Message } func (e *XksProxyUriUnreachableException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "XksProxyUriUnreachableException" } return *e.ErrorCodeOverride @@ -1337,7 +1307,7 @@ func (e *XksProxyVpcEndpointServiceInUseException) ErrorMessage() string { return *e.Message } func (e *XksProxyVpcEndpointServiceInUseException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "XksProxyVpcEndpointServiceInUseException" } return *e.ErrorCodeOverride @@ -1368,7 +1338,7 @@ func (e *XksProxyVpcEndpointServiceInvalidConfigurationException) ErrorMessage() return *e.Message } func (e *XksProxyVpcEndpointServiceInvalidConfigurationException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "XksProxyVpcEndpointServiceInvalidConfigurationException" } return *e.ErrorCodeOverride @@ -1378,10 +1348,10 @@ func (e *XksProxyVpcEndpointServiceInvalidConfigurationException) ErrorFault() s } // The request was rejected because KMS could not find the specified VPC endpoint -// service. Use DescribeCustomKeyStores to verify the VPC endpoint service name for -// the external key store. Also, confirm that the Allow principals list for the VPC -// endpoint service includes the KMS service principal for the Region, such as -// cks.kms.us-east-1.amazonaws.com. +// service. Use DescribeCustomKeyStores to verify the VPC endpoint service name +// for the external key store. Also, confirm that the Allow principals list for +// the VPC endpoint service includes the KMS service principal for the Region, such +// as cks.kms.us-east-1.amazonaws.com . type XksProxyVpcEndpointServiceNotFoundException struct { Message *string @@ -1400,7 +1370,7 @@ func (e *XksProxyVpcEndpointServiceNotFoundException) ErrorMessage() string { return *e.Message } func (e *XksProxyVpcEndpointServiceNotFoundException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "XksProxyVpcEndpointServiceNotFoundException" } return *e.ErrorCodeOverride diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go index 4d971e91..a5ec972e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go @@ -13,7 +13,7 @@ type AliasListEntry struct { // String that contains the key ARN. AliasArn *string - // String that contains the alias. This value begins with alias/. + // String that contains the alias. This value begins with alias/ . AliasName *string // Date and time that the alias was most recently created in the account and @@ -34,185 +34,134 @@ type AliasListEntry struct { // Contains information about each custom key store in the custom key store list. type CustomKeyStoresListEntry struct { - // A unique identifier for the CloudHSM cluster that is associated with an CloudHSM - // key store. This field appears only when the CustomKeyStoreType is AWS_CLOUDHSM. + // A unique identifier for the CloudHSM cluster that is associated with an + // CloudHSM key store. This field appears only when the CustomKeyStoreType is + // AWS_CLOUDHSM . CloudHsmClusterId *string - // Describes the connection error. This field appears in the response only when the - // ConnectionState is FAILED. Many failures can be resolved by updating the + // Describes the connection error. This field appears in the response only when + // the ConnectionState is FAILED . Many failures can be resolved by updating the // properties of the custom key store. To update a custom key store, disconnect it - // (DisconnectCustomKeyStore), correct the errors (UpdateCustomKeyStore), and try - // to connect again (ConnectCustomKeyStore). For additional help resolving these - // errors, see How to Fix a Connection Failure - // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) + // ( DisconnectCustomKeyStore ), correct the errors ( UpdateCustomKeyStore ), and + // try to connect again ( ConnectCustomKeyStore ). For additional help resolving + // these errors, see How to Fix a Connection Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) // in Key Management Service Developer Guide. All custom key stores: - // - // * - // INTERNAL_ERROR — KMS could not complete the request due to an internal error. - // Retry the request. For ConnectCustomKeyStore requests, disconnect the custom key - // store before trying to connect again. - // - // * NETWORK_ERRORS — Network errors are - // preventing KMS from connecting the custom key store to its backing key - // store. - // + // - INTERNAL_ERROR — KMS could not complete the request due to an internal + // error. Retry the request. For ConnectCustomKeyStore requests, disconnect the + // custom key store before trying to connect again. + // - NETWORK_ERRORS — Network errors are preventing KMS from connecting the + // custom key store to its backing key store. // CloudHSM key stores: - // - // * CLUSTER_NOT_FOUND — KMS cannot find the CloudHSM - // cluster with the specified cluster ID. - // - // * INSUFFICIENT_CLOUDHSM_HSMS — The - // associated CloudHSM cluster does not contain any active HSMs. To connect a - // custom key store to its CloudHSM cluster, the cluster must contain at least one - // active HSM. - // - // * INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET — At least one private - // subnet associated with the CloudHSM cluster doesn't have any available IP - // addresses. A CloudHSM key store connection requires one free IP address in each - // of the associated private subnets, although two are preferable. For details, see - // How to Fix a Connection Failure - // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) - // in the Key Management Service Developer Guide. - // - // * INVALID_CREDENTIALS — The - // KeyStorePassword for the custom key store doesn't match the current password of - // the kmsuser crypto user in the CloudHSM cluster. Before you can connect your - // custom key store to its CloudHSM cluster, you must change the kmsuser account - // password and update the KeyStorePassword value for the custom key store. - // - // * - // SUBNET_NOT_FOUND — A subnet in the CloudHSM cluster configuration was deleted. - // If KMS cannot find all of the subnets in the cluster configuration, attempts to - // connect the custom key store to the CloudHSM cluster fail. To fix this error, - // create a cluster from a recent backup and associate it with your custom key - // store. (This process creates a new cluster configuration with a VPC and private - // subnets.) For details, see How to Fix a Connection Failure - // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) - // in the Key Management Service Developer Guide. - // - // * USER_LOCKED_OUT — The kmsuser - // CU account is locked out of the associated CloudHSM cluster due to too many - // failed password attempts. Before you can connect your custom key store to its - // CloudHSM cluster, you must change the kmsuser account password and update the - // key store password value for the custom key store. - // - // * USER_LOGGED_IN — The - // kmsuser CU account is logged into the associated CloudHSM cluster. This prevents - // KMS from rotating the kmsuser account password and logging into the cluster. - // Before you can connect your custom key store to its CloudHSM cluster, you must - // log the kmsuser CU out of the cluster. If you changed the kmsuser password to - // log into the cluster, you must also and update the key store password value for - // the custom key store. For help, see How to Log Out and Reconnect - // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#login-kmsuser-2) - // in the Key Management Service Developer Guide. - // - // * USER_NOT_FOUND — KMS cannot - // find a kmsuser CU account in the associated CloudHSM cluster. Before you can - // connect your custom key store to its CloudHSM cluster, you must create a kmsuser - // CU account in the cluster, and then update the key store password value for the - // custom key store. - // + // - CLUSTER_NOT_FOUND — KMS cannot find the CloudHSM cluster with the specified + // cluster ID. + // - INSUFFICIENT_CLOUDHSM_HSMS — The associated CloudHSM cluster does not + // contain any active HSMs. To connect a custom key store to its CloudHSM cluster, + // the cluster must contain at least one active HSM. + // - INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET — At least one private subnet + // associated with the CloudHSM cluster doesn't have any available IP addresses. A + // CloudHSM key store connection requires one free IP address in each of the + // associated private subnets, although two are preferable. For details, see How + // to Fix a Connection Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) + // in the Key Management Service Developer Guide. + // - INVALID_CREDENTIALS — The KeyStorePassword for the custom key store doesn't + // match the current password of the kmsuser crypto user in the CloudHSM cluster. + // Before you can connect your custom key store to its CloudHSM cluster, you must + // change the kmsuser account password and update the KeyStorePassword value for + // the custom key store. + // - SUBNET_NOT_FOUND — A subnet in the CloudHSM cluster configuration was + // deleted. If KMS cannot find all of the subnets in the cluster configuration, + // attempts to connect the custom key store to the CloudHSM cluster fail. To fix + // this error, create a cluster from a recent backup and associate it with your + // custom key store. (This process creates a new cluster configuration with a VPC + // and private subnets.) For details, see How to Fix a Connection Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) + // in the Key Management Service Developer Guide. + // - USER_LOCKED_OUT — The kmsuser CU account is locked out of the associated + // CloudHSM cluster due to too many failed password attempts. Before you can + // connect your custom key store to its CloudHSM cluster, you must change the + // kmsuser account password and update the key store password value for the + // custom key store. + // - USER_LOGGED_IN — The kmsuser CU account is logged into the associated + // CloudHSM cluster. This prevents KMS from rotating the kmsuser account password + // and logging into the cluster. Before you can connect your custom key store to + // its CloudHSM cluster, you must log the kmsuser CU out of the cluster. If you + // changed the kmsuser password to log into the cluster, you must also and update + // the key store password value for the custom key store. For help, see How to + // Log Out and Reconnect (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#login-kmsuser-2) + // in the Key Management Service Developer Guide. + // - USER_NOT_FOUND — KMS cannot find a kmsuser CU account in the associated + // CloudHSM cluster. Before you can connect your custom key store to its CloudHSM + // cluster, you must create a kmsuser CU account in the cluster, and then update + // the key store password value for the custom key store. // External key stores: - // - // * INVALID_CREDENTIALS — One or both of - // the XksProxyAuthenticationCredential values is not valid on the specified - // external key store proxy. - // - // * XKS_PROXY_ACCESS_DENIED — KMS requests are denied - // access to the external key store proxy. If the external key store proxy has - // authorization rules, verify that they permit KMS to communicate with the proxy - // on your behalf. - // - // * XKS_PROXY_INVALID_CONFIGURATION — A configuration error is - // preventing the external key store from connecting to its proxy. Verify the value - // of the XksProxyUriPath. - // - // * XKS_PROXY_INVALID_RESPONSE — KMS cannot interpret the - // response from the external key store proxy. If you see this connection error - // code repeatedly, notify your external key store proxy vendor. - // - // * - // XKS_PROXY_INVALID_TLS_CONFIGURATION — KMS cannot connect to the external key - // store proxy because the TLS configuration is invalid. Verify that the XKS proxy - // supports TLS 1.2 or 1.3. Also, verify that the TLS certificate is not expired, - // and that it matches the hostname in the XksProxyUriEndpoint value, and that it - // is signed by a certificate authority included in the Trusted Certificate - // Authorities - // (https://github.com/aws/aws-kms-xksproxy-api-spec/blob/main/TrustedCertificateAuthorities) - // list. - // - // * XKS_PROXY_NOT_REACHABLE — KMS can't communicate with your external key - // store proxy. Verify that the XksProxyUriEndpoint and XksProxyUriPath are - // correct. Use the tools for your external key store proxy to verify that the - // proxy is active and available on its network. Also, verify that your external - // key manager instances are operating properly. Connection attempts fail with this - // connection error code if the proxy reports that all external key manager - // instances are unavailable. - // - // * XKS_PROXY_TIMED_OUT — KMS can connect to the - // external key store proxy, but the proxy does not respond to KMS in the time - // allotted. If you see this connection error code repeatedly, notify your external - // key store proxy vendor. - // - // * XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION — The - // Amazon VPC endpoint service configuration doesn't conform to the requirements - // for an KMS external key store. - // - // * The VPC endpoint service must be an endpoint - // service for interface endpoints in the caller's Amazon Web Services account. - // - // * - // It must have a network load balancer (NLB) connected to at least two subnets, - // each in a different Availability Zone. - // - // * The Allow principals list must include - // the KMS service principal for the Region, cks.kms..amazonaws.com, - // - // such as - // cks.kms.us-east-1.amazonaws.com. - // - // * It must not require acceptance - // (https://docs.aws.amazon.com/vpc/latest/privatelink/create-endpoint-service.html) - // of connection requests. - // - // * It must have a private DNS name. The private DNS name - // for an external key store with VPC_ENDPOINT_SERVICE connectivity must be unique - // in its Amazon Web Services Region. - // - // * The domain of the private DNS name must - // have a verification status - // (https://docs.aws.amazon.com/vpc/latest/privatelink/verify-domains.html) of - // verified. - // - // * The TLS certificate - // (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html) - // specifies the private DNS hostname at which the endpoint is reachable. - // - // * - // XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND — KMS can't find the VPC endpoint service - // that it uses to communicate with the external key store proxy. Verify that the - // XksProxyVpcEndpointServiceName is correct and the KMS service principal has - // service consumer permissions on the Amazon VPC endpoint service. + // - INVALID_CREDENTIALS — One or both of the XksProxyAuthenticationCredential + // values is not valid on the specified external key store proxy. + // - XKS_PROXY_ACCESS_DENIED — KMS requests are denied access to the external key + // store proxy. If the external key store proxy has authorization rules, verify + // that they permit KMS to communicate with the proxy on your behalf. + // - XKS_PROXY_INVALID_CONFIGURATION — A configuration error is preventing the + // external key store from connecting to its proxy. Verify the value of the + // XksProxyUriPath . + // - XKS_PROXY_INVALID_RESPONSE — KMS cannot interpret the response from the + // external key store proxy. If you see this connection error code repeatedly, + // notify your external key store proxy vendor. + // - XKS_PROXY_INVALID_TLS_CONFIGURATION — KMS cannot connect to the external key + // store proxy because the TLS configuration is invalid. Verify that the XKS proxy + // supports TLS 1.2 or 1.3. Also, verify that the TLS certificate is not expired, + // and that it matches the hostname in the XksProxyUriEndpoint value, and that it + // is signed by a certificate authority included in the Trusted Certificate + // Authorities (https://github.com/aws/aws-kms-xksproxy-api-spec/blob/main/TrustedCertificateAuthorities) + // list. + // - XKS_PROXY_NOT_REACHABLE — KMS can't communicate with your external key store + // proxy. Verify that the XksProxyUriEndpoint and XksProxyUriPath are correct. + // Use the tools for your external key store proxy to verify that the proxy is + // active and available on its network. Also, verify that your external key manager + // instances are operating properly. Connection attempts fail with this connection + // error code if the proxy reports that all external key manager instances are + // unavailable. + // - XKS_PROXY_TIMED_OUT — KMS can connect to the external key store proxy, but + // the proxy does not respond to KMS in the time allotted. If you see this + // connection error code repeatedly, notify your external key store proxy vendor. + // - XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION — The Amazon VPC endpoint + // service configuration doesn't conform to the requirements for an KMS external + // key store. + // - The VPC endpoint service must be an endpoint service for interface + // endpoints in the caller's Amazon Web Services account. + // - It must have a network load balancer (NLB) connected to at least two + // subnets, each in a different Availability Zone. + // - The Allow principals list must include the KMS service principal for the + // Region, cks.kms..amazonaws.com , such as cks.kms.us-east-1.amazonaws.com . + // - It must not require acceptance (https://docs.aws.amazon.com/vpc/latest/privatelink/create-endpoint-service.html) + // of connection requests. + // - It must have a private DNS name. The private DNS name for an external key + // store with VPC_ENDPOINT_SERVICE connectivity must be unique in its Amazon Web + // Services Region. + // - The domain of the private DNS name must have a verification status (https://docs.aws.amazon.com/vpc/latest/privatelink/verify-domains.html) + // of verified . + // - The TLS certificate (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html) + // specifies the private DNS hostname at which the endpoint is reachable. + // - XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND — KMS can't find the VPC endpoint service + // that it uses to communicate with the external key store proxy. Verify that the + // XksProxyVpcEndpointServiceName is correct and the KMS service principal has + // service consumer permissions on the Amazon VPC endpoint service. ConnectionErrorCode ConnectionErrorCodeType // Indicates whether the custom key store is connected to its backing key store. - // For an CloudHSM key store, the ConnectionState indicates whether it is connected - // to its CloudHSM cluster. For an external key store, the ConnectionState - // indicates whether it is connected to the external key store proxy that - // communicates with your external key manager. You can create and use KMS keys in - // your custom key stores only when its ConnectionState is CONNECTED. The - // ConnectionState value is DISCONNECTED only if the key store has never been - // connected or you use the DisconnectCustomKeyStore operation to disconnect it. If - // the value is CONNECTED but you are having trouble using the custom key store, - // make sure that the backing key store is reachable and active. For an CloudHSM - // key store, verify that its associated CloudHSM cluster is active and contains at - // least one active HSM. For an external key store, verify that the external key - // store proxy and external key manager are connected and enabled. A value of - // FAILED indicates that an attempt to connect was unsuccessful. The + // For an CloudHSM key store, the ConnectionState indicates whether it is + // connected to its CloudHSM cluster. For an external key store, the + // ConnectionState indicates whether it is connected to the external key store + // proxy that communicates with your external key manager. You can create and use + // KMS keys in your custom key stores only when its ConnectionState is CONNECTED . + // The ConnectionState value is DISCONNECTED only if the key store has never been + // connected or you use the DisconnectCustomKeyStore operation to disconnect it. + // If the value is CONNECTED but you are having trouble using the custom key + // store, make sure that the backing key store is reachable and active. For an + // CloudHSM key store, verify that its associated CloudHSM cluster is active and + // contains at least one active HSM. For an external key store, verify that the + // external key store proxy and external key manager are connected and enabled. A + // value of FAILED indicates that an attempt to connect was unsuccessful. The // ConnectionErrorCode field in the response indicates the cause of the failure. - // For help resolving a connection failure, see Troubleshooting a custom key store - // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) in the - // Key Management Service Developer Guide. + // For help resolving a connection failure, see Troubleshooting a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) + // in the Key Management Service Developer Guide. ConnectionState ConnectionStateType // The date and time when the custom key store was created. @@ -230,35 +179,31 @@ type CustomKeyStoresListEntry struct { // Amazon Web Services. CustomKeyStoreType CustomKeyStoreType - // The trust anchor certificate of the CloudHSM cluster associated with an CloudHSM - // key store. When you initialize the cluster - // (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), - // you create this certificate and save it in the customerCA.crt file. This field - // appears only when the CustomKeyStoreType is AWS_CLOUDHSM. + // The trust anchor certificate of the CloudHSM cluster associated with an + // CloudHSM key store. When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr) + // , you create this certificate and save it in the customerCA.crt file. This + // field appears only when the CustomKeyStoreType is AWS_CLOUDHSM . TrustAnchorCertificate *string // Configuration settings for the external key store proxy (XKS proxy). The // external key store proxy translates KMS requests into a format that your // external key manager can understand. The proxy configuration includes connection // information that KMS requires. This field appears only when the - // CustomKeyStoreType is EXTERNAL_KEY_STORE. + // CustomKeyStoreType is EXTERNAL_KEY_STORE . XksProxyConfiguration *XksProxyConfigurationType noSmithyDocumentSerde } -// Use this structure to allow cryptographic operations -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// Use this structure to allow cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) // in the grant only when the operation request includes the specified encryption -// context -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). -// KMS applies the grant constraints only to cryptographic operations that support -// an encryption context, that is, all cryptographic operations with a symmetric -// KMS key -// (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks). -// Grant constraints are not applied to operations that do not support an +// context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// . KMS applies the grant constraints only to cryptographic operations that +// support an encryption context, that is, all cryptographic operations with a +// symmetric KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks) +// . Grant constraints are not applied to operations that do not support an // encryption context, such as cryptographic operations with asymmetric KMS keys -// and management operations, such as DescribeKey or RetireGrant. In a +// and management operations, such as DescribeKey or RetireGrant . In a // cryptographic operation, the encryption context in the decryption operation must // be an exact, case-sensitive match for the keys and values in the encryption // context of the encryption operation. Only the order of the pairs can vary. @@ -267,21 +212,18 @@ type CustomKeyStoresListEntry struct { // multiple encryption context pairs that differ only by case. To require a fully // case-sensitive encryption context, use the kms:EncryptionContext: and // kms:EncryptionContextKeys conditions in an IAM or key policy. For details, see -// kms:EncryptionContext: -// (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-context) +// kms:EncryptionContext: (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-context) // in the Key Management Service Developer Guide . type GrantConstraints struct { // A list of key-value pairs that must match the encryption context in the - // cryptographic operation - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // cryptographic operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) // request. The grant allows the operation only when the encryption context in the // request is the same as the encryption context specified in this constraint. EncryptionContextEquals map[string]string - // A list of key-value pairs that must be included in the encryption context of the - // cryptographic operation - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // A list of key-value pairs that must be included in the encryption context of + // the cryptographic operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) // request. The grant allows the cryptographic operation only when the encryption // context in the request includes the key-value pairs specified in this // constraint, although it can include additional key-value pairs. @@ -307,9 +249,8 @@ type GrantListEntry struct { // in the ListGrants response usually contains the user or role designated as the // grantee principal in the grant. However, when the grantee principal in the grant // is an Amazon Web Services service, the GranteePrincipal field contains the - // service principal - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), - // which might represent several different grantee principals. + // service principal (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services) + // , which might represent several different grantee principals. GranteePrincipal *string // The Amazon Web Services account under which the grant was issued. @@ -344,7 +285,7 @@ type KeyListEntry struct { } // Contains metadata about a KMS key. This data type is used as a response element -// for the CreateKey, DescribeKey, and ReplicateKey operations. +// for the CreateKey , DescribeKey , and ReplicateKey operations. type KeyMetadata struct { // The globally unique identifier for the KMS key. @@ -352,28 +293,26 @@ type KeyMetadata struct { // This member is required. KeyId *string - // The twelve-digit account ID of the Amazon Web Services account that owns the KMS - // key. + // The twelve-digit account ID of the Amazon Web Services account that owns the + // KMS key. AWSAccountId *string // The Amazon Resource Name (ARN) of the KMS key. For examples, see Key Management - // Service (KMS) - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms) + // Service (KMS) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms) // in the Example ARNs section of the Amazon Web Services General Reference. Arn *string // The cluster ID of the CloudHSM cluster that contains the key material for the - // KMS key. When you create a KMS key in an CloudHSM custom key store - // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), - // KMS creates the key material for the KMS key in the associated CloudHSM cluster. - // This field is present only when the KMS key is created in an CloudHSM key store. + // KMS key. When you create a KMS key in an CloudHSM custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) + // , KMS creates the key material for the KMS key in the associated CloudHSM + // cluster. This field is present only when the KMS key is created in an CloudHSM + // key store. CloudHsmClusterId *string // The date and time when the KMS key was created. CreationDate *time.Time - // A unique identifier for the custom key store - // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) + // A unique identifier for the custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) // that contains the KMS key. This field is present only when the KMS key is // created in a custom key store. CustomKeyStoreId *string @@ -387,32 +326,31 @@ type KeyMetadata struct { // The date and time after which KMS deletes this KMS key. This value is present // only when the KMS key is scheduled for deletion, that is, when its KeyState is - // PendingDeletion. When the primary key in a multi-Region key is scheduled for - // deletion but still has replica keys, its key state is PendingReplicaDeletion and - // the length of its waiting period is displayed in the PendingDeletionWindowInDays - // field. + // PendingDeletion . When the primary key in a multi-Region key is scheduled for + // deletion but still has replica keys, its key state is PendingReplicaDeletion + // and the length of its waiting period is displayed in the + // PendingDeletionWindowInDays field. DeletionDate *time.Time // The description of the KMS key. Description *string - // Specifies whether the KMS key is enabled. When KeyState is Enabled this value is - // true, otherwise it is false. + // Specifies whether the KMS key is enabled. When KeyState is Enabled this value + // is true, otherwise it is false. Enabled bool // The encryption algorithms that the KMS key supports. You cannot use the KMS key // with other encryption algorithms within KMS. This value is present only when the - // KeyUsage of the KMS key is ENCRYPT_DECRYPT. + // KeyUsage of the KMS key is ENCRYPT_DECRYPT . EncryptionAlgorithms []EncryptionAlgorithmSpec - // Specifies whether the KMS key's key material expires. This value is present only - // when Origin is EXTERNAL, otherwise this value is omitted. + // Specifies whether the KMS key's key material expires. This value is present + // only when Origin is EXTERNAL , otherwise this value is omitted. ExpirationModel ExpirationModelType // The manager of the KMS key. KMS keys in your Amazon Web Services account are // either customer managed or Amazon Web Services managed. For more information - // about the difference, see KMS keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys) + // about the difference, see KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys) // in the Key Management Service Developer Guide. KeyManager KeyManagerType @@ -420,57 +358,49 @@ type KeyMetadata struct { KeySpec KeySpec // The current status of the KMS key. For more information about how key state - // affects the use of a KMS key, see Key states of KMS keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the - // Key Management Service Developer Guide. + // affects the use of a KMS key, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) + // in the Key Management Service Developer Guide. KeyState KeyState - // The cryptographic operations - // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // The cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) // for which you can use the KMS key. KeyUsage KeyUsageType // The message authentication code (MAC) algorithm that the HMAC KMS key supports. // This value is present only when the KeyUsage of the KMS key is - // GENERATE_VERIFY_MAC. + // GENERATE_VERIFY_MAC . MacAlgorithms []MacAlgorithmSpec - // Indicates whether the KMS key is a multi-Region (True) or regional (False) key. - // This value is True for multi-Region primary and replica keys and False for + // Indicates whether the KMS key is a multi-Region ( True ) or regional ( False ) + // key. This value is True for multi-Region primary and replica keys and False for // regional KMS keys. For more information about multi-Region keys, see - // Multi-Region keys in KMS - // (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) + // Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) // in the Key Management Service Developer Guide. MultiRegion *bool // Lists the primary and replica keys in same multi-Region key. This field is - // present only when the value of the MultiRegion field is True. For more + // present only when the value of the MultiRegion field is True . For more // information about any listed KMS key, use the DescribeKey operation. - // - // * - // MultiRegionKeyType indicates whether the KMS key is a PRIMARY or REPLICA key. - // - // * - // PrimaryKey displays the key ARN and Region of the primary key. This field - // displays the current KMS key if it is the primary key. - // - // * ReplicaKeys displays - // the key ARNs and Regions of all replica keys. This field includes the current - // KMS key if it is a replica key. + // - MultiRegionKeyType indicates whether the KMS key is a PRIMARY or REPLICA + // key. + // - PrimaryKey displays the key ARN and Region of the primary key. This field + // displays the current KMS key if it is the primary key. + // - ReplicaKeys displays the key ARNs and Regions of all replica keys. This + // field includes the current KMS key if it is a replica key. MultiRegionConfiguration *MultiRegionConfiguration - // The source of the key material for the KMS key. When this value is AWS_KMS, KMS - // created the key material. When this value is EXTERNAL, the key material was + // The source of the key material for the KMS key. When this value is AWS_KMS , KMS + // created the key material. When this value is EXTERNAL , the key material was // imported or the KMS key doesn't have any key material. When this value is - // AWS_CLOUDHSM, the key material was created in the CloudHSM cluster associated + // AWS_CLOUDHSM , the key material was created in the CloudHSM cluster associated // with a custom key store. Origin OriginType - // The waiting period before the primary key in a multi-Region key is deleted. This - // waiting period begins when the last of its replica keys is deleted. This value - // is present only when the KeyState of the KMS key is PendingReplicaDeletion. That - // indicates that the KMS key is the primary key in a multi-Region key, it is - // scheduled for deletion, and it still has existing replica keys. When a + // The waiting period before the primary key in a multi-Region key is deleted. + // This waiting period begins when the last of its replica keys is deleted. This + // value is present only when the KeyState of the KMS key is PendingReplicaDeletion + // . That indicates that the KMS key is the primary key in a multi-Region key, it + // is scheduled for deletion, and it still has existing replica keys. When a // single-Region KMS key or a multi-Region replica key is scheduled for deletion, // its deletion date is displayed in the DeletionDate field. However, when the // primary key in a multi-Region key is scheduled for deletion, its waiting period @@ -483,18 +413,17 @@ type KeyMetadata struct { // The signing algorithms that the KMS key supports. You cannot use the KMS key // with other signing algorithms within KMS. This field appears only when the - // KeyUsage of the KMS key is SIGN_VERIFY. + // KeyUsage of the KMS key is SIGN_VERIFY . SigningAlgorithms []SigningAlgorithmSpec // The time at which the imported key material expires. When the key material // expires, KMS deletes the key material and the KMS key becomes unusable. This // value is present only for KMS keys whose Origin is EXTERNAL and whose - // ExpirationModel is KEY_MATERIAL_EXPIRES, otherwise this value is omitted. + // ExpirationModel is KEY_MATERIAL_EXPIRES , otherwise this value is omitted. ValidTo *time.Time // Information about the external key that is associated with a KMS key in an - // external key store. For more information, see External key - // (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key) + // external key store. For more information, see External key (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key) // in the Key Management Service Developer Guide. XksKeyConfiguration *XksKeyConfigurationType @@ -533,11 +462,32 @@ type MultiRegionKey struct { noSmithyDocumentSerde } +// Contains information about the party that receives the response from the API +// operation. This data type is designed to support Amazon Web Services Nitro +// Enclaves, which lets you create an isolated compute environment in Amazon EC2. +// For information about the interaction between KMS and Amazon Web Services Nitro +// Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) +// in the Key Management Service Developer Guide. +type RecipientInfo struct { + + // The attestation document for an Amazon Web Services Nitro Enclave. This + // document includes the enclave's public key. + AttestationDocument []byte + + // The encryption algorithm that KMS should use with the public key for an Amazon + // Web Services Nitro Enclave to encrypt plaintext values for the response. The + // only valid value is RSAES_OAEP_SHA_256 . + KeyEncryptionAlgorithm KeyEncryptionMechanism + + noSmithyDocumentSerde +} + // A key-value pair. A tag consists of a tag key and a tag value. Tag keys and tag -// values are both required, but tag values can be empty (null) strings. For -// information about the rules that apply to tag keys and tag values, see -// User-Defined Tag Restrictions -// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// values are both required, but tag values can be empty (null) strings. Do not +// include confidential or sensitive information in this field. This field may be +// displayed in plaintext in CloudTrail logs and other output. For information +// about the rules that apply to tag keys and tag values, see User-Defined Tag +// Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) // in the Amazon Web Services Billing and Cost Management User Guide. type Tag struct { @@ -554,16 +504,14 @@ type Tag struct { noSmithyDocumentSerde } -// Information about the external key -// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key)that -// is associated with a KMS key in an external key store. This element appears in a -// CreateKey or DescribeKey response only for a KMS key in an external key store. -// The external key is a symmetric encryption key that is hosted by an external key -// manager outside of Amazon Web Services. When you use the KMS key in an external -// key store in a cryptographic operation, the cryptographic operation is performed -// in the external key manager using the specified external key. For more -// information, see External key -// (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key) +// Information about the external key (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key) +// that is associated with a KMS key in an external key store. This element appears +// in a CreateKey or DescribeKey response only for a KMS key in an external key +// store. The external key is a symmetric encryption key that is hosted by an +// external key manager outside of Amazon Web Services. When you use the KMS key in +// an external key store in a cryptographic operation, the cryptographic operation +// is performed in the external key manager using the specified external key. For +// more information, see External key (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key) // in the Key Management Service Developer Guide. type XksKeyConfigurationType struct { @@ -598,11 +546,10 @@ type XksProxyAuthenticationCredentialType struct { // external key store proxy translates KMS requests into a format that your // external key manager can understand. These fields appear in a // DescribeCustomKeyStores response only when the CustomKeyStoreType is -// EXTERNAL_KEY_STORE. +// EXTERNAL_KEY_STORE . type XksProxyConfigurationType struct { - // The part of the external key store proxy authentication credential - // (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateCustomKeyStore.html#KMS-CreateCustomKeyStore-request-XksProxyAuthenticationCredential) + // The part of the external key store proxy authentication credential (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateCustomKeyStore.html#KMS-CreateCustomKeyStore-request-XksProxyAuthenticationCredential) // that uniquely identifies the secret access key. AccessKeyId *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 91054696..42fae8ef 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,52 @@ +# v1.12.12 (2023-06-15) + +* No change notes available for this release. + +# v1.12.11 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2023-05-04) + +* No change notes available for this release. + +# v1.12.9 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2023-04-10) + +* No change notes available for this release. + +# v1.12.7 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.12.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.12.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.12.0 (2023-01-05) * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index 7bb06984..6f30ddc9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -114,7 +114,7 @@ type Options struct { Retryer aws.Retryer // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You // should not populate this structure programmatically, or rely on the values here // within your applications. RuntimeEnvironment aws.RuntimeEnvironment diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go index 1c2b7499..3825b9e4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -10,8 +10,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the STS short-term credentials for a given role name that is assigned to -// the user. +// Returns the STS short-term credentials for a given role name that is assigned +// to the user. func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) { if params == nil { params = &GetRoleCredentialsInput{} @@ -30,8 +30,7 @@ func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredenti type GetRoleCredentialsInput struct { // The token issued by the CreateToken API call. For more information, see - // CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the IAM Identity Center OIDC API Reference Guide. // // This member is required. @@ -106,6 +105,9 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go index 4fffc77a..ef3592af 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -30,8 +30,7 @@ func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesI type ListAccountRolesInput struct { // The token issued by the CreateToken API call. For more information, see - // CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the IAM Identity Center OIDC API Reference Guide. // // This member is required. @@ -112,6 +111,9 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go index e717a426..cfa8cdc9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -12,8 +12,7 @@ import ( ) // Lists all AWS accounts assigned to the user. These AWS accounts are assigned by -// the administrator of the account. For more information, see Assign User Access -// (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// the administrator of the account. For more information, see Assign User Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) // in the IAM Identity Center User Guide. This operation returns a paginated // response. func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { @@ -34,8 +33,7 @@ func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, op type ListAccountsInput struct { // The token issued by the CreateToken API call. For more information, see - // CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the IAM Identity Center OIDC API Reference Guide. // // This member is required. @@ -111,6 +109,9 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go index 8b9b4474..c13d73f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -19,9 +19,8 @@ import ( // temporary AWS credentials are returned to the client. After user logout, any // existing IAM role sessions that were created by using IAM Identity Center // permission sets continue based on the duration configured in the permission set. -// For more information, see User authentications -// (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) in -// the IAM Identity Center User Guide. +// For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) +// in the IAM Identity Center User Guide. func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { if params == nil { params = &LogoutInput{} @@ -40,8 +39,7 @@ func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func type LogoutInput struct { // The token issued by the CreateToken API call. For more information, see - // CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the IAM Identity Center OIDC API Reference Guide. // // This member is required. @@ -102,6 +100,9 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go index 6a1851da..8bba205f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go @@ -86,9 +86,9 @@ func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Resp errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -97,7 +97,7 @@ func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Resp body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -109,8 +109,8 @@ func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Resp } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -242,9 +242,9 @@ func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Respon errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -253,7 +253,7 @@ func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Respon body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -265,8 +265,8 @@ func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Respon } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -407,9 +407,9 @@ func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -418,7 +418,7 @@ func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -430,8 +430,8 @@ func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -550,9 +550,9 @@ func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metada errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -561,7 +561,7 @@ func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metada body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -573,8 +573,8 @@ func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metada } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go index f981b154..59456d5d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go @@ -9,14 +9,13 @@ // and roles assigned to them and get federated into the application. Although AWS // Single Sign-On was renamed, the sso and identitystore API namespaces will // continue to retain their original name for backward compatibility purposes. For -// more information, see IAM Identity Center rename -// (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). -// This reference guide describes the IAM Identity Center Portal operations that +// more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) +// . This reference guide describes the IAM Identity Center Portal operations that // you can call programatically and includes detailed information on data types and // errors. AWS provides SDKs that consist of libraries and sample code for various // programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. // The SDKs provide a convenient way to create programmatic access to IAM Identity // Center and other AWS services. For more information about the AWS SDKs, -// including how to download and install them, see Tools for Amazon Web Services -// (http://aws.amazon.com/tools/). +// including how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/) +// . package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 3046500f..b67186c8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.0" +const goModuleVersion = "1.12.12" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go index 90e52137..b1ce03a5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -89,6 +89,8 @@ var partitionRegexp = struct { AwsCn *regexp.Regexp AwsIso *regexp.Regexp AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp AwsUsGov *regexp.Regexp }{ @@ -96,6 +98,8 @@ var partitionRegexp = struct { AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), } @@ -390,6 +394,48 @@ var defaultPartitions = endpoints.Partitions{ RegionRegex: partitionRegexp.AwsIsoB, IsRegionalized: true, }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, { ID: "aws-us-gov", Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go index 8f0ac3a2..e97a126e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go @@ -27,7 +27,7 @@ func (e *InvalidRequestException) ErrorMessage() string { return *e.Message } func (e *InvalidRequestException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidRequestException" } return *e.ErrorCodeOverride @@ -53,7 +53,7 @@ func (e *ResourceNotFoundException) ErrorMessage() string { return *e.Message } func (e *ResourceNotFoundException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "ResourceNotFoundException" } return *e.ErrorCodeOverride @@ -80,7 +80,7 @@ func (e *TooManyRequestsException) ErrorMessage() string { return *e.Message } func (e *TooManyRequestsException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "TooManyRequestsException" } return *e.ErrorCodeOverride @@ -107,7 +107,7 @@ func (e *UnauthorizedException) ErrorMessage() string { return *e.Message } func (e *UnauthorizedException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "UnauthorizedException" } return *e.ErrorCodeOverride diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go index 051056b7..8dc02296 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go @@ -26,8 +26,7 @@ type RoleCredentials struct { // The identifier used for the temporary security credentials. For more // information, see Using Temporary Security Credentials to Request Access to AWS - // Resources - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) // in the AWS IAM User Guide. AccessKeyId *string @@ -35,14 +34,12 @@ type RoleCredentials struct { Expiration int64 // The key that is used to sign the request. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) // in the AWS IAM User Guide. SecretAccessKey *string // The token used for temporary credentials. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) // in the AWS IAM User Guide. SessionToken *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 5d6cc157..c6c01750 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,52 @@ +# v1.14.12 (2023-06-15) + +* No change notes available for this release. + +# v1.14.11 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.10 (2023-05-04) + +* No change notes available for this release. + +# v1.14.9 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.8 (2023-04-10) + +* No change notes available for this release. + +# v1.14.7 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.6 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.5 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.14.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.14.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.14.0 (2023-01-05) * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index 5e0a85a2..111f66d3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -114,7 +114,7 @@ type Options struct { Retryer aws.Retryer // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You // should not populate this structure programmatically, or rely on the values here // within your applications. RuntimeEnvironment aws.RuntimeEnvironment diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go index cde97b4f..5d7bd3c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -43,7 +43,7 @@ type CreateTokenInput struct { // Supports grant types for the authorization code, refresh token, and device code // request. For device code requests, specify the following value: - // urn:ietf:params:oauth:grant-type:device_code For information about how to + // urn:ietf:params:oauth:grant-type:device_code For information about how to // obtain the device code, see the StartDeviceAuthorization topic. // // This member is required. @@ -65,9 +65,8 @@ type CreateTokenInput struct { // Currently, refreshToken is not yet implemented and is not supported. For more // information about the features and limitations of the current IAM Identity // Center OIDC implementation, see Considerations for Using this Guide in the IAM - // Identity Center OIDC API Reference - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). - // The token used to obtain an access token in the event that the access token is + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . The token used to obtain an access token in the event that the access token is // invalid or expired. RefreshToken *string @@ -89,22 +88,20 @@ type CreateTokenOutput struct { // Currently, idToken is not yet implemented and is not supported. For more // information about the features and limitations of the current IAM Identity // Center OIDC implementation, see Considerations for Using this Guide in the IAM - // Identity Center OIDC API Reference - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). - // The identifier of the user that associated with the access token, if present. + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . The identifier of the user that associated with the access token, if present. IdToken *string // Currently, refreshToken is not yet implemented and is not supported. For more // information about the features and limitations of the current IAM Identity // Center OIDC implementation, see Considerations for Using this Guide in the IAM - // Identity Center OIDC API Reference - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). - // A token that, if present, can be used to refresh a previously issued access + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . A token that, if present, can be used to refresh a previously issued access // token that might have expired. RefreshToken *string // Used to notify the client that the returned token is an access token. The - // supported type is BearerToken. + // supported type is BearerToken . TokenType *string // Metadata pertaining to the operation's result. @@ -158,6 +155,9 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go index 3ed8cc35..1d83b226 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -40,8 +40,8 @@ type RegisterClientInput struct { // This member is required. ClientType *string - // The list of scopes that are defined by the client. Upon authorization, this list - // is used to restrict permissions when granting an access token. + // The list of scopes that are defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. Scopes []string noSmithyDocumentSerde @@ -59,8 +59,8 @@ type RegisterClientOutput struct { // Indicates the time at which the clientId and clientSecret were issued. ClientIdIssuedAt int64 - // A secret string generated for the client. The client will use this string to get - // authenticated by the service in subsequent calls. + // A secret string generated for the client. The client will use this string to + // get authenticated by the service in subsequent calls. ClientSecret *string // Indicates the time at which the clientId and clientSecret will become invalid. @@ -120,6 +120,9 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go index 013ccbc9..70e60db1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -28,9 +28,9 @@ func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDevi type StartDeviceAuthorizationInput struct { - // The unique identifier string for the client that is registered with IAM Identity - // Center. This value should come from the persisted result of the RegisterClient - // API operation. + // The unique identifier string for the client that is registered with IAM + // Identity Center. This value should come from the persisted result of the + // RegisterClient API operation. // // This member is required. ClientId *string @@ -42,8 +42,7 @@ type StartDeviceAuthorizationInput struct { ClientSecret *string // The URL for the AWS access portal. For more information, see Using the AWS - // access portal - // (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) // in the IAM Identity Center User Guide. // // This member is required. @@ -73,9 +72,9 @@ type StartDeviceAuthorizationOutput struct { // device. VerificationUri *string - // An alternate URL that the client can use to automatically launch a browser. This - // process skips the manual step in which the user visits the verification page and - // enters their code. + // An alternate URL that the client can use to automatically launch a browser. + // This process skips the manual step in which the user visits the verification + // page and enters their code. VerificationUriComplete *string // Metadata pertaining to the operation's result. @@ -129,6 +128,9 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go index e9939aff..ca30d22f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go @@ -85,9 +85,9 @@ func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, m errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -96,7 +96,7 @@ func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, m body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -108,8 +108,8 @@ func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, m } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -306,9 +306,9 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -317,7 +317,7 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -329,8 +329,8 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message @@ -519,9 +519,9 @@ func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhtt errorCode := "UnknownError" errorMessage := errorCode - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) } var buff [1024]byte @@ -530,7 +530,7 @@ func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhtt body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -542,8 +542,8 @@ func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhtt } errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) } if len(message) != 0 { errorMessage = message diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go index a025f732..2239427d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -1,7 +1,7 @@ // Code generated by smithy-go-codegen DO NOT EDIT. -// Package ssooidc provides the API client, operations, and parameter types for AWS -// SSO OIDC. +// Package ssooidc provides the API client, operations, and parameter types for +// AWS SSO OIDC. // // AWS IAM Identity Center (successor to AWS Single Sign-On) OpenID Connect (OIDC) // is a web service that enables a client (such as AWS CLI or a native application) @@ -10,37 +10,27 @@ // with IAM Identity Center. Although AWS Single Sign-On was renamed, the sso and // identitystore API namespaces will continue to retain their original name for // backward compatibility purposes. For more information, see IAM Identity Center -// rename -// (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). -// Considerations for Using This Guide Before you begin using this guide, we +// rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) +// . Considerations for Using This Guide Before you begin using this guide, we // recommend that you first review the following important information about how // the IAM Identity Center OIDC service works. +// - The IAM Identity Center OIDC service currently implements only the portions +// of the OAuth 2.0 Device Authorization Grant standard ( +// https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628) ) +// that are necessary to enable single sign-on authentication with the AWS CLI. +// Support for other OIDC flows frequently needed for native applications, such as +// Authorization Code Flow (+ PKCE), will be addressed in future releases. +// - The service emits only OIDC access tokens, such that obtaining a new token +// (For example, token refresh) requires explicit user re-authentication. +// - The access tokens provided by this service grant access to all AWS account +// entitlements assigned to an IAM Identity Center user, not just a particular +// application. +// - The documentation in this guide does not describe the mechanism to convert +// the access token into AWS Auth (“sigv4”) credentials for use with IAM-protected +// AWS service endpoints. For more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// in the IAM Identity Center Portal API Reference Guide. // -// * The IAM Identity Center OIDC -// service currently implements only the portions of the OAuth 2.0 Device -// Authorization Grant standard (https://tools.ietf.org/html/rfc8628 -// (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single -// sign-on authentication with the AWS CLI. Support for other OIDC flows frequently -// needed for native applications, such as Authorization Code Flow (+ PKCE), will -// be addressed in future releases. -// -// * The service emits only OIDC access tokens, -// such that obtaining a new token (For example, token refresh) requires explicit -// user re-authentication. -// -// * The access tokens provided by this service grant -// access to all AWS account entitlements assigned to an IAM Identity Center user, -// not just a particular application. -// -// * The documentation in this guide does not -// describe the mechanism to convert the access token into AWS Auth (“sigv4”) -// credentials for use with IAM-protected AWS service endpoints. For more -// information, see GetRoleCredentials -// (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) -// in the IAM Identity Center Portal API Reference Guide. -// -// For general information -// about IAM Identity Center, see What is IAM Identity Center? -// (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) in the -// IAM Identity Center User Guide. +// For general information about IAM Identity Center, see What is IAM Identity +// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the IAM Identity Center User Guide. package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 6e76b14b..82156c20 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.14.0" +const goModuleVersion = "1.14.12" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go index 2212db1c..b04fb46f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go @@ -89,6 +89,8 @@ var partitionRegexp = struct { AwsCn *regexp.Regexp AwsIso *regexp.Regexp AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp AwsUsGov *regexp.Regexp }{ @@ -96,6 +98,8 @@ var partitionRegexp = struct { AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), } @@ -390,6 +394,48 @@ var defaultPartitions = endpoints.Partitions{ RegionRegex: partitionRegexp.AwsIsoB, IsRegionalized: true, }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, { ID: "aws-us-gov", Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go index b75cc489..115a51a9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go @@ -29,15 +29,15 @@ func (e *AccessDeniedException) ErrorMessage() string { return *e.Message } func (e *AccessDeniedException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "AccessDeniedException" } return *e.ErrorCodeOverride } func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// Indicates that a request to authorize a client with an access user session token -// is pending. +// Indicates that a request to authorize a client with an access user session +// token is pending. type AuthorizationPendingException struct { Message *string @@ -59,7 +59,7 @@ func (e *AuthorizationPendingException) ErrorMessage() string { return *e.Message } func (e *AuthorizationPendingException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "AuthorizationPendingException" } return *e.ErrorCodeOverride @@ -89,7 +89,7 @@ func (e *ExpiredTokenException) ErrorMessage() string { return *e.Message } func (e *ExpiredTokenException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "ExpiredTokenException" } return *e.ErrorCodeOverride @@ -119,7 +119,7 @@ func (e *InternalServerException) ErrorMessage() string { return *e.Message } func (e *InternalServerException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InternalServerException" } return *e.ErrorCodeOverride @@ -128,7 +128,7 @@ func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy // Indicates that the clientId or clientSecret in the request is invalid. For // example, this can occur when a client sends an incorrect clientId or an expired -// clientSecret. +// clientSecret . type InvalidClientException struct { Message *string @@ -150,15 +150,15 @@ func (e *InvalidClientException) ErrorMessage() string { return *e.Message } func (e *InvalidClientException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidClientException" } return *e.ErrorCodeOverride } func (e *InvalidClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// Indicates that the client information sent in the request during registration is -// invalid. +// Indicates that the client information sent in the request during registration +// is invalid. type InvalidClientMetadataException struct { Message *string @@ -180,7 +180,7 @@ func (e *InvalidClientMetadataException) ErrorMessage() string { return *e.Message } func (e *InvalidClientMetadataException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidClientMetadataException" } return *e.ErrorCodeOverride @@ -210,7 +210,7 @@ func (e *InvalidGrantException) ErrorMessage() string { return *e.Message } func (e *InvalidGrantException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidGrantException" } return *e.ErrorCodeOverride @@ -240,7 +240,7 @@ func (e *InvalidRequestException) ErrorMessage() string { return *e.Message } func (e *InvalidRequestException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidRequestException" } return *e.ErrorCodeOverride @@ -269,7 +269,7 @@ func (e *InvalidScopeException) ErrorMessage() string { return *e.Message } func (e *InvalidScopeException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidScopeException" } return *e.ErrorCodeOverride @@ -299,7 +299,7 @@ func (e *SlowDownException) ErrorMessage() string { return *e.Message } func (e *SlowDownException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "SlowDownException" } return *e.ErrorCodeOverride @@ -329,7 +329,7 @@ func (e *UnauthorizedClientException) ErrorMessage() string { return *e.Message } func (e *UnauthorizedClientException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "UnauthorizedClientException" } return *e.ErrorCodeOverride @@ -358,7 +358,7 @@ func (e *UnsupportedGrantTypeException) ErrorMessage() string { return *e.Message } func (e *UnsupportedGrantTypeException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "UnsupportedGrantTypeException" } return *e.ErrorCodeOverride diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index e624d601..46a30e5b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,60 @@ +# v1.19.2 (2023-06-15) + +* No change notes available for this release. + +# v1.19.1 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2023-05-08) + +* **Feature**: Documentation updates for AWS Security Token Service. + +# v1.18.11 (2023-05-04) + +* No change notes available for this release. + +# v1.18.10 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2023-04-10) + +* No change notes available for this release. + +# v1.18.8 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.18.4 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. + +# v1.18.2 (2023-01-25) + +* **Documentation**: Doc only change to update wording in a key topic + +# v1.18.1 (2023-01-23) + +* No change notes available for this release. + # v1.18.0 (2023-01-05) * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index 3041fc46..78eb2670 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -117,7 +117,7 @@ type Options struct { Retryer aws.Retryer // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You // should not populate this structure programmatically, or rely on the values here // within your applications. RuntimeEnvironment aws.RuntimeEnvironment diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go index f4f4f46f..db22efc0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -12,21 +12,17 @@ import ( ) // Returns a set of temporary security credentials that you can use to access -// Amazon Web Services resources that you might not normally have access to. These -// temporary credentials consist of an access key ID, a secret access key, and a -// security token. Typically, you use AssumeRole within your account or for -// cross-account access. For a comparison of AssumeRole with other API operations -// that produce temporary credentials, see Requesting Temporary Security -// Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// Amazon Web Services resources. These temporary credentials consist of an access +// key ID, a secret access key, and a security token. Typically, you use AssumeRole +// within your account or for cross-account access. For a comparison of AssumeRole +// with other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. Permissions The temporary security credentials created by -// AssumeRole can be used to make API calls to any Amazon Web Services service with -// the following exception: You cannot call the Amazon Web Services STS +// AssumeRole can be used to make API calls to any Amazon Web Services service +// with the following exception: You cannot call the Amazon Web Services STS // GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policy Amazon // Resource Names (ARNs) to use as managed session policies. The plaintext that you @@ -37,49 +33,39 @@ import ( // credentials in subsequent Amazon Web Services API calls to access resources in // the account that owns the role. You cannot use session policies to grant more // permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. When you create a role, you create two policies: A role -// trust policy that specifies who can assume the role and a permissions policy +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. When you create a role, you create two policies: a role +// trust policy that specifies who can assume the role, and a permissions policy // that specifies what can be done with the role. You specify the trusted principal -// who is allowed to assume the role in the role trust policy. To assume a role +// that is allowed to assume the role in the role trust policy. To assume a role // from a different account, your Amazon Web Services account must be trusted by // the role. The trust relationship is defined in the role's trust policy when the // role is created. That trust policy states which accounts are allowed to delegate // that access to users in the account. A user who wants to access a role in a -// different account must also have permissions that are delegated from the user -// account administrator. The administrator must attach a policy that allows the -// user to call AssumeRole for the ARN of the role in the other account. To allow a -// user to assume a role in the same account, you can do either of the -// following: +// different account must also have permissions that are delegated from the account +// administrator. The administrator must attach a policy that allows the user to +// call AssumeRole for the ARN of the role in the other account. To allow a user +// to assume a role in the same account, you can do either of the following: +// - Attach a policy to the user that allows the user to call AssumeRole (as long +// as the role's trust policy trusts the account). +// - Add the user as a principal directly in the role's trust policy. // -// * Attach a policy to the user that allows the user to call -// AssumeRole (as long as the role's trust policy trusts the account). -// -// * Add the -// user as a principal directly in the role's trust policy. -// -// You can do either -// because the role’s trust policy acts as an IAM resource-based policy. When a -// resource-based policy grants access to a principal in the same account, no -// additional identity-based policy is required. For more information about trust -// policies and resource-based policies, see IAM Policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) in the -// IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your +// You can do either because the role’s trust policy acts as an IAM resource-based +// policy. When a resource-based policy grants access to a principal in the same +// account, no additional identity-based policy is required. For more information +// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your // session. These tags are called session tags. For more information about session -// tags, see Passing Session Tags in STS -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the -// IAM User Guide. An administrator must grant you the permissions necessary to -// pass session tags. The administrator can also create granular permissions to +// tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. An administrator must grant you the permissions necessary +// to pass session tags. The administrator can also create granular permissions to // allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) // in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles with -// Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include -// multi-factor authentication (MFA) information when you call AssumeRole. This is +// multi-factor authentication (MFA) information when you call AssumeRole . This is // useful for cross-account scenarios to ensure that the user that assumes the role // has been authenticated with an Amazon Web Services MFA device. In that scenario, // the trust policy of the role being assumed includes a condition that tests for @@ -87,12 +73,11 @@ import ( // request to assume the role is denied. The condition in a trust policy that tests // for MFA authentication might look like the following example. "Condition": // {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see -// Configuring MFA-Protected API Access -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) in the -// IAM User Guide guide. To use MFA with AssumeRole, you pass values for the -// SerialNumber and TokenCode parameters. The SerialNumber value identifies the -// user's hardware or virtual MFA device. The TokenCode is the time-based one-time -// password (TOTP) that the MFA device produces. +// Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. To use MFA with AssumeRole , you pass values for +// the SerialNumber and TokenCode parameters. The SerialNumber value identifies +// the user's hardware or virtual MFA device. The TokenCode is the time-based +// one-time password (TOTP) that the MFA device produces. func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { if params == nil { params = &AssumeRoleInput{} @@ -144,16 +129,14 @@ type AssumeRoleInput struct { // maximum session duration setting for your role. However, if you assume a role // using role chaining and provide a DurationSeconds parameter value greater than // one hour, the operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. By default, the value is set to 3600 seconds. The // DurationSeconds parameter is separate from the duration of a console session // that you might request using the returned credentials. The request to the // federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int32 @@ -166,8 +149,7 @@ type AssumeRoleInput struct { // administrator of the trusted account. That way, only someone with the ID can // assume the role, rather than everyone in the account. For more information about // the external ID, see How to Use an External ID When Granting Access to Your - // Amazon Web Services Resources to a Third Party - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // Amazon Web Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) // in the IAM User Guide. The regex used to validate this parameter is a string of // characters consisting of upper- and lower-case alphanumeric characters with no // spaces. You can also include underscores or any of the following characters: @@ -182,8 +164,7 @@ type AssumeRoleInput struct { // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based // policy of the role that is being assumed. For more information, see Session - // Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. The plaintext that you use for both inline and managed // session policies can't exceed 2,048 characters. The JSON policy characters can // be any ASCII character from the space character to the end of the valid @@ -201,9 +182,8 @@ type AssumeRoleInput struct { // the role. This parameter is optional. You can provide up to 10 managed policy // ARNs. However, the plaintext that you use for both inline and managed session // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in - // the Amazon Web Services General Reference. An Amazon Web Services conversion + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion // compresses the passed inline session policy, managed policy ARNs, and session // tags into a packed binary format that has a separate limit. Your request can // fail for this limit even if your plaintext meets the other requirements. The @@ -215,17 +195,16 @@ type AssumeRoleInput struct { // Services API calls to access resources in the account that owns the role. You // cannot use session policies to grant more permissions than those allowed by the // identity-based policy of the role that is being assumed. For more information, - // see Session Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. PolicyArns []types.PolicyDescriptorType - // The identification number of the MFA device that is associated with the user who - // is making the AssumeRole call. Specify this value if the trust policy of the - // role being assumed includes a condition that requires MFA authentication. The - // value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as - // arn:aws:iam::123456789012:mfa/user). The regex used to validate this parameter + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy of + // the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as + // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as + // arn:aws:iam::123456789012:mfa/user ). The regex used to validate this parameter // is a string of characters consisting of upper- and lower-case alphanumeric // characters with no spaces. You can also include underscores or any of the // following characters: =,.@- @@ -238,24 +217,21 @@ type AssumeRoleInput struct { // who took actions with a role. You can use the aws:SourceIdentity condition key // to further control access to Amazon Web Services resources based on the value of // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) // in the IAM User Guide. The regex used to validate this parameter is a string of // characters consisting of upper- and lower-case alphanumeric characters with no // spaces. You can also include underscores or any of the following characters: - // =,.@-. You cannot use a value that begins with the text aws:. This prefix is + // =,.@-. You cannot use a value that begins with the text aws: . This prefix is // reserved for Amazon Web Services internal use. SourceIdentity *string - // A list of session tags that you want to pass. Each session tag consists of a key - // name and an associated value. For more information about session tags, see - // Tagging Amazon Web Services STS Sessions - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the - // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. - // The plaintext session tag keys can’t exceed 128 characters, and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // A list of session tags that you want to pass. Each session tag consists of a + // key name and an associated value. For more information about session tags, see + // Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. This parameter is optional. You can pass up to 50 session + // tags. The plaintext session tag keys can’t exceed 128 characters, and the values + // can’t exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. An Amazon Web Services conversion compresses the passed // inline session policy, managed policy ARNs, and session tags into a packed // binary format that has a separate limit. Your request can fail for this limit @@ -265,16 +241,15 @@ type AssumeRoleInput struct { // same key as a tag that is already attached to the role. When you do, session // tags override a role tag with the same key. Tag key–value pairs are not case // sensitive, but case is preserved. This means that you cannot have separate - // Department and department tag keys. Assume that the role has the - // Department=Marketing tag and you pass the department=engineering session tag. - // Department and department are not saved as separate tags, and the session tag - // passed in the request takes precedence over the role tag. Additionally, if you - // used temporary credentials to perform this operation, the new session inherits - // any transitive session tags from the calling session. If you pass a session tag - // with the same key as an inherited tag, the operation fails. To view the - // inherited tags for a session, see the CloudTrail logs. For more information, see - // Viewing Session Tags in CloudTrail - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // Department and department tag keys. Assume that the role has the Department = + // Marketing tag and you pass the department = engineering session tag. Department + // and department are not saved as separate tags, and the session tag passed in + // the request takes precedence over the role tag. Additionally, if you used + // temporary credentials to perform this operation, the new session inherits any + // transitive session tags from the calling session. If you pass a session tag with + // the same key as an inherited tag, the operation fails. To view the inherited + // tags for a session, see the CloudTrail logs. For more information, see Viewing + // Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) // in the IAM User Guide. Tags []types.Tag @@ -286,11 +261,10 @@ type AssumeRoleInput struct { // sequence of six numeric digits. TokenCode *string - // A list of keys for session tags that you want to set as transitive. If you set a - // tag key as transitive, the corresponding key and value passes to subsequent + // A list of keys for session tags that you want to set as transitive. If you set + // a tag key as transitive, the corresponding key and value passes to subsequent // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. This parameter is optional. When you set session tags as // transitive, the session policy and session tags packed binary limit is not // affected. If you choose not to specify a transitive tag key, then no tags are @@ -309,7 +283,7 @@ type AssumeRoleOutput struct { // that you can use to refer to the resulting temporary security credentials. For // example, you can reference these credentials as a principal in a resource-based // policy by using the ARN or assumed role ID. The ARN and ID include the - // RoleSessionName that you specified when you called AssumeRole. + // RoleSessionName that you specified when you called AssumeRole . AssumedRoleUser *types.AssumedRoleUser // The temporary security credentials, which include an access key ID, a secret @@ -331,8 +305,7 @@ type AssumeRoleOutput struct { // who took actions with a role. You can use the aws:SourceIdentity condition key // to further control access to Amazon Web Services resources based on the value of // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) // in the IAM User Guide. The regex used to validate this parameter is a string of // characters consisting of upper- and lower-case alphanumeric characters with no // spaces. You can also include underscores or any of the following characters: @@ -396,6 +369,9 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go index 4ed0f5d0..65dccc9e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -15,10 +15,8 @@ import ( // mechanism for tying an enterprise identity store or directory to role-based // Amazon Web Services access without user-specific credentials or configuration. // For a comparison of AssumeRoleWithSAML with the other API operations that -// produce temporary credentials, see Requesting Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// produce temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. The temporary security credentials returned by this // operation consist of an access key ID, a secret access key, and a security // token. Applications can use these temporary security credentials to sign calls @@ -31,15 +29,12 @@ import ( // DurationSeconds value from 900 seconds (15 minutes) up to the maximum session // duration setting for the role. This setting can have a value from 1 hour to 12 // hours. To learn how to view the maximum value for your role, see View the -// Maximum Session Duration Setting for a Role -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. The maximum session duration limit applies when you use // the AssumeRole* API operations or the assume-role* CLI commands. However the // limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM -// User Guide. Role chaining -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) +// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) // limits your CLI or Amazon Web Services API role session to a maximum of one // hour. When you use the AssumeRole API operation to assume a role, you can // specify the duration of your role session with the DurationSeconds parameter. @@ -50,8 +45,7 @@ import ( // credentials created by AssumeRoleWithSAML can be used to make API calls to any // Amazon Web Services service with the following exception: you cannot call the // STS GetFederationToken or GetSessionToken API operations. (Optional) You can -// pass inline or managed session policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policy Amazon // Resource Names (ARNs) to use as managed session policies. The plaintext that you @@ -62,8 +56,7 @@ import ( // credentials in subsequent Amazon Web Services API calls to access resources in // the account that owns the role. You cannot use session policies to grant more // permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of // Amazon Web Services security credentials. The identity of the caller is // validated by using keys in the metadata document that is uploaded for the SAML @@ -71,16 +64,14 @@ import ( // result in an entry in your CloudTrail logs. The entry includes the value in the // NameID element of the SAML assertion. We recommend that you use a NameIDType // that is not associated with any personally identifiable information (PII). For -// example, you could instead use the persistent identifier -// (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). Tags (Optional) You can +// example, you could instead use the persistent identifier ( +// urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). Tags (Optional) You can // configure your IdP to pass attributes into your SAML assertion as session tags. // Each session tag consists of a key name and an associated value. For more -// information about session tags, see Passing Session Tags in STS -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the -// IAM User Guide. You can pass up to 50 session tags. The plaintext session tag -// keys can’t exceed 128 characters and the values can’t exceed 256 characters. For -// these and additional limits, see IAM and STS Character Limits -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// information about session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session +// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. +// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. An Amazon Web Services conversion compresses the passed // inline session policy, managed policy ARNs, and session tags into a packed // binary format that has a separate limit. Your request can fail for this limit @@ -91,36 +82,25 @@ import ( // override the role's tags with the same key. An administrator must grant you the // permissions necessary to pass session tags. The administrator can also create // granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) // in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles with -// Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. SAML Configuration Before your application can call -// AssumeRoleWithSAML, you must configure your SAML identity provider (IdP) to +// AssumeRoleWithSAML , you must configure your SAML identity provider (IdP) to // issue the claims required by Amazon Web Services. Additionally, you must use // Identity and Access Management (IAM) to create a SAML provider entity in your // Amazon Web Services account that represents your identity provider. You must // also create an IAM role that specifies this SAML provider in its trust policy. // For more information, see the following resources: -// -// * About SAML 2.0-based -// Federation -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// -// * Creating SAML Identity Providers -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// -// * Configuring a Relying Party and Claims -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// -// * Creating a Role for SAML 2.0 Federation -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. +// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { if params == nil { params = &AssumeRoleWithSAMLInput{} @@ -150,8 +130,7 @@ type AssumeRoleWithSAMLInput struct { RoleArn *string // The base64 encoded SAML authentication response provided by the IdP. For more - // information, see Configuring a Relying Party and Adding Claims - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) // in the IAM User Guide. // // This member is required. @@ -166,16 +145,14 @@ type AssumeRoleWithSAMLInput struct { // than this setting, the operation fails. For example, if you specify a session // duration of 12 hours, but your administrator set the maximum session duration to // 6 hours, your operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. By default, the value is set to 3600 seconds. The // DurationSeconds parameter is separate from the duration of a console session // that you might request using the returned credentials. The request to the // federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int32 @@ -187,8 +164,7 @@ type AssumeRoleWithSAMLInput struct { // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based // policy of the role that is being assumed. For more information, see Session - // Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. The plaintext that you use for both inline and managed // session policies can't exceed 2,048 characters. The JSON policy characters can // be any ASCII character from the space character to the end of the valid @@ -206,9 +182,8 @@ type AssumeRoleWithSAMLInput struct { // the role. This parameter is optional. You can provide up to 10 managed policy // ARNs. However, the plaintext that you use for both inline and managed session // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in - // the Amazon Web Services General Reference. An Amazon Web Services conversion + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion // compresses the passed inline session policy, managed policy ARNs, and session // tags into a packed binary format that has a separate limit. Your request can // fail for this limit even if your plaintext meets the other requirements. The @@ -220,8 +195,7 @@ type AssumeRoleWithSAMLInput struct { // Services API calls to access resources in the account that owns the role. You // cannot use session policies to grant more permissions than those allowed by the // identity-based policy of the role that is being assumed. For more information, - // see Session Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. PolicyArns []types.PolicyDescriptorType @@ -251,19 +225,12 @@ type AssumeRoleWithSAMLOutput struct { Issuer *string // A hash value based on the concatenation of the following: - // - // * The Issuer response - // value. - // - // * The Amazon Web Services account ID. - // - // * The friendly name (the last - // part of the ARN) of the SAML provider in IAM. - // - // The combination of NameQualifier - // and Subject can be used to uniquely identify a federated user. The following - // pseudocode shows how the hash value is calculated: BASE64 ( SHA1 ( - // "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) + // - The Issuer response value. + // - The Amazon Web Services account ID. + // - The friendly name (the last part of the ARN) of the SAML provider in IAM. + // The combination of NameQualifier and Subject can be used to uniquely identify a + // user. The following pseudocode shows how the hash value is calculated: BASE64 ( + // SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) NameQualifier *string // A percentage value that indicates the packed size of the session policies and @@ -272,20 +239,18 @@ type AssumeRoleWithSAMLOutput struct { // allowed space. PackedPolicySize *int32 - // The value in the SourceIdentity attribute in the SAML assertion. You can require - // users to set a source identity value when they assume a role. You do this by - // using the sts:SourceIdentity condition key in a role trust policy. That way, - // actions that are taken with the role are associated with that user. After the - // source identity is set, the value cannot be changed. It is present in the + // The value in the SourceIdentity attribute in the SAML assertion. You can + // require users to set a source identity value when they assume a role. You do + // this by using the sts:SourceIdentity condition key in a role trust policy. That + // way, actions that are taken with the role are associated with that user. After + // the source identity is set, the value cannot be changed. It is present in the // request for all actions that are taken by the role and persists across chained - // role - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) // sessions. You can configure your SAML identity provider to use an attribute // associated with your users, like user name or email, as the source identity when - // calling AssumeRoleWithSAML. You do this by adding an attribute to the SAML + // calling AssumeRoleWithSAML . You do this by adding an attribute to the SAML // assertion. For more information about using source identity, see Monitor and - // control actions taken with assumed roles - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) // in the IAM User Guide. The regex used to validate this parameter is a string of // characters consisting of upper- and lower-case alphanumeric characters with no // spaces. You can also include underscores or any of the following characters: @@ -297,10 +262,10 @@ type AssumeRoleWithSAMLOutput struct { // The format of the name ID, as defined by the Format attribute in the NameID // element of the SAML assertion. Typical examples of the format are transient or - // persistent. If the format includes the prefix - // urn:oasis:names:tc:SAML:2.0:nameid-format, that prefix is removed. For example, - // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient. If - // the format includes any other prefix, the format is returned with no + // persistent . If the format includes the prefix + // urn:oasis:names:tc:SAML:2.0:nameid-format , that prefix is removed. For example, + // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient . + // If the format includes any other prefix, the format is returned with no // modifications. SubjectType *string @@ -355,6 +320,9 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go index e2ff4ac6..f00f3076 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -14,54 +14,44 @@ import ( // authenticated in a mobile or web application with a web identity provider. // Example providers include the OAuth 2.0 providers Login with Amazon and // Facebook, or any OpenID Connect-compatible identity provider such as Google or -// Amazon Cognito federated identities -// (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html). -// For mobile applications, we recommend that you use Amazon Cognito. You can use -// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide -// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android -// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify a -// user. You can also supply the user with a consistent identity throughout the -// lifetime of an application. To learn more about Amazon Cognito, see Amazon -// Cognito Overview -// (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito -// Overview -// (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the Amazon Web Services SDK for iOS Developer Guide. Calling -// AssumeRoleWithWebIdentity does not require the use of Amazon Web Services -// security credentials. Therefore, you can distribute an application (for example, -// on mobile devices) that requests temporary security credentials without -// including long-term Amazon Web Services credentials in the application. You also -// don't need to deploy server-based proxy services that use long-term Amazon Web -// Services credentials. Instead, the identity of the caller is validated by using -// a token from the web identity provider. For a comparison of -// AssumeRoleWithWebIdentity with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// Amazon Cognito federated identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// . For mobile applications, we recommend that you use Amazon Cognito. You can use +// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. To learn more about Amazon +// Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// in Amazon Cognito Developer Guide. Calling AssumeRoleWithWebIdentity does not +// require the use of Amazon Web Services security credentials. Therefore, you can +// distribute an application (for example, on mobile devices) that requests +// temporary security credentials without including long-term Amazon Web Services +// credentials in the application. You also don't need to deploy server-based proxy +// services that use long-term Amazon Web Services credentials. Instead, the +// identity of the caller is validated by using a token from the web identity +// provider. For a comparison of AssumeRoleWithWebIdentity with the other API +// operations that produce temporary credentials, see Requesting Temporary +// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. The temporary security credentials returned by this API // consist of an access key ID, a secret access key, and a security token. // Applications can use these temporary security credentials to sign calls to // Amazon Web Services service API operations. Session Duration By default, the -// temporary security credentials created by AssumeRoleWithWebIdentity last for one -// hour. However, you can use the optional DurationSeconds parameter to specify the -// duration of your session. You can provide a value from 900 seconds (15 minutes) -// up to the maximum session duration setting for the role. This setting can have a -// value from 1 hour to 12 hours. To learn how to view the maximum value for your -// role, see View the Maximum Session Duration Setting for a Role -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// temporary security credentials created by AssumeRoleWithWebIdentity last for +// one hour. However, you can use the optional DurationSeconds parameter to +// specify the duration of your session. You can provide a value from 900 seconds +// (15 minutes) up to the maximum session duration setting for the role. This +// setting can have a value from 1 hour to 12 hours. To learn how to view the +// maximum value for your role, see View the Maximum Session Duration Setting for +// a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. The maximum session duration limit applies when you use // the AssumeRole* API operations or the assume-role* CLI commands. However the // limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM -// User Guide. Permissions The temporary security credentials created by +// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. Permissions The temporary security credentials created by // AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web // Services service with the following exception: you cannot call the STS // GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policy Amazon // Resource Names (ARNs) to use as managed session policies. The plaintext that you @@ -72,17 +62,14 @@ import ( // credentials in subsequent Amazon Web Services API calls to access resources in // the account that owns the role. You cannot use session policies to grant more // permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. Tags (Optional) You can configure your IdP to pass // attributes into your web identity token as session tags. Each session tag // consists of a key name and an associated value. For more information about -// session tags, see Passing Session Tags in STS -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the -// IAM User Guide. You can pass up to 50 session tags. The plaintext session tag -// keys can’t exceed 128 characters and the values can’t exceed 256 characters. For -// these and additional limits, see IAM and STS Character Limits -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session +// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. +// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. An Amazon Web Services conversion compresses the passed // inline session policy, managed policy ARNs, and session tags into a packed // binary format that has a separate limit. Your request can fail for this limit @@ -93,52 +80,38 @@ import ( // overrides the role tag with the same key. An administrator must grant you the // permissions necessary to pass session tags. The administrator can also create // granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) // in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles with -// Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. Identities Before your application can call -// AssumeRoleWithWebIdentity, you must have an identity token from a supported +// AssumeRoleWithWebIdentity , you must have an identity token from a supported // identity provider and create a role that the application can assume. The role // that your application assumes must trust the identity provider that is // associated with the identity token. In other words, the identity provider must // be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can -// result in an entry in your CloudTrail logs. The entry includes the Subject -// (http://openid.net/specs/openid-connect-core-1_0.html#Claims) of the provided -// web identity token. We recommend that you avoid using any personally -// identifiable information (PII) in this field. For example, you could instead use -// a GUID or a pairwise identifier, as suggested in the OIDC specification -// (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). For more -// information about how to use web identity federation and the +// result in an entry in your CloudTrail logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided web identity token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you could +// instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes) +// . For more information about how to use web identity federation and the // AssumeRoleWithWebIdentity API, see the following resources: -// -// * Using Web -// Identity Federation API Operations for Mobile Apps -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// * -// Web Identity Federation Playground -// (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). -// Walk through the process of authenticating through Login with Amazon, Facebook, -// or Google, getting temporary security credentials, and then using those -// credentials to make a request to Amazon Web Services. -// -// * Amazon Web Services SDK -// for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and Amazon Web -// Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). -// These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these -// providers to get and use temporary security credentials. -// -// * Web Identity -// Federation with Mobile Applications -// (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). -// This article discusses web identity federation and shows an example of how to -// use web identity federation to get access to content in Amazon S3. +// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// . +// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/) +// . Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then using +// those credentials to make a request to Amazon Web Services. +// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// . These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications) +// . This article discusses web identity federation and shows an example of how to +// use web identity federation to get access to content in Amazon S3. func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { if params == nil { params = &AssumeRoleWithWebIdentityInput{} @@ -187,16 +160,14 @@ type AssumeRoleWithWebIdentityInput struct { // higher than this setting, the operation fails. For example, if you specify a // session duration of 12 hours, but your administrator set the maximum session // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a Role - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // value for your role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. By default, the value is set to 3600 seconds. The // DurationSeconds parameter is separate from the duration of a console session // that you might request using the returned credentials. The request to the // federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int32 @@ -208,8 +179,7 @@ type AssumeRoleWithWebIdentityInput struct { // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based // policy of the role that is being assumed. For more information, see Session - // Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. The plaintext that you use for both inline and managed // session policies can't exceed 2,048 characters. The JSON policy characters can // be any ASCII character from the space character to the end of the valid @@ -227,9 +197,8 @@ type AssumeRoleWithWebIdentityInput struct { // the role. This parameter is optional. You can provide up to 10 managed policy // ARNs. However, the plaintext that you use for both inline and managed session // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in - // the Amazon Web Services General Reference. An Amazon Web Services conversion + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion // compresses the passed inline session policy, managed policy ARNs, and session // tags into a packed binary format that has a separate limit. Your request can // fail for this limit even if your plaintext meets the other requirements. The @@ -241,8 +210,7 @@ type AssumeRoleWithWebIdentityInput struct { // Services API calls to access resources in the account that owns the role. You // cannot use session policies to grant more permissions than those allowed by the // identity-based policy of the role that is being assumed. For more information, - // see Session Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. PolicyArns []types.PolicyDescriptorType @@ -265,7 +233,7 @@ type AssumeRoleWithWebIdentityOutput struct { // that you can use to refer to the resulting temporary security credentials. For // example, you can reference these credentials as a principal in a resource-based // policy by using the ARN or assumed role ID. The ARN and ID include the - // RoleSessionName that you specified when you called AssumeRole. + // RoleSessionName that you specified when you called AssumeRole . AssumedRoleUser *types.AssumedRoleUser // The intended audience (also known as client ID) of the web identity token. This @@ -285,10 +253,10 @@ type AssumeRoleWithWebIdentityOutput struct { // allowed space. PackedPolicySize *int32 - // The issuing authority of the web identity token presented. For OpenID Connect ID - // tokens, this contains the value of the iss field. For OAuth 2.0 access tokens, - // this contains the value of the ProviderId parameter that was passed in the - // AssumeRoleWithWebIdentity request. + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed in + // the AssumeRoleWithWebIdentity request. Provider *string // The value of the source identity that is returned in the JSON web token (JWT) @@ -297,17 +265,14 @@ type AssumeRoleWithWebIdentityOutput struct { // key in a role trust policy. That way, actions that are taken with the role are // associated with that user. After the source identity is set, the value cannot be // changed. It is present in the request for all actions that are taken by the role - // and persists across chained role - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // and persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) // sessions. You can configure your identity provider to use an attribute // associated with your users, like user name or email, as the source identity when - // calling AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web - // token. To learn more about OIDC tokens and claims, see Using Tokens with User - // Pools - // (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) + // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON + // web token. To learn more about OIDC tokens and claims, see Using Tokens with + // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) // in the Amazon Cognito Developer Guide. For more information about using source - // identity, see Monitor and control actions taken with assumed roles - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) // in the IAM User Guide. The regex used to validate this parameter is a string of // characters consisting of upper- and lower-case alphanumeric characters with no // spaces. You can also include underscores or any of the following characters: @@ -373,6 +338,9 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go index b7a637d4..587d1d3c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -22,27 +22,17 @@ import ( // encoded because the details of the authorization status can contain privileged // information that the user who requested the operation should not see. To decode // an authorization status message, a user must be granted permissions through an -// IAM policy -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) to -// request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) action. -// The decoded message includes the following type of information: -// -// * Whether the -// request was denied due to an explicit deny or due to the absence of an explicit -// allow. For more information, see Determining Whether a Request is Allowed or -// Denied -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. -// -// * The principal who made the request. -// -// * The requested -// action. -// -// * The requested resource. -// -// * The values of condition keys in the -// context of the user's request. +// IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// to request the DecodeAuthorizationMessage ( sts:DecodeAuthorizationMessage ) +// action. The decoded message includes the following type of information: +// - Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether a +// Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// - The principal who made the request. +// - The requested action. +// - The requested resource. +// - The values of condition keys in the context of the user's request. func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { if params == nil { params = &DecodeAuthorizationMessageInput{} @@ -133,6 +123,9 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go index b86a425d..f090ecf4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -11,21 +11,18 @@ import ( ) // Returns the account identifier for the specified access key ID. Access keys -// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) and a -// secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). For -// more information about access keys, see Managing Access Keys for IAM Users -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE ) and +// a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). +// For more information about access keys, see Managing Access Keys for IAM Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) // in the IAM User Guide. When you pass an access key ID to this operation, it // returns the ID of the Amazon Web Services account to which the keys belong. // Access key IDs beginning with AKIA are long-term credentials for an IAM user or // the Amazon Web Services account root user. Access key IDs beginning with ASIA // are temporary credentials that are created using STS operations. If the account // in the response belongs to you, you can sign in as the root user and review your -// root user access keys. Then, you can pull a credentials report -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// root user access keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) // to learn which IAM user owns the keys. To learn who requested the temporary -// credentials for an ASIA access key, view the STS events in your CloudTrail logs -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// credentials for an ASIA access key, view the STS events in your CloudTrail logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) // in the IAM User Guide. This operation does not indicate the state of the access // key. The key might be active, inactive, or deleted. Active keys might not have // permissions to perform an operation. Providing a deleted access key might return @@ -119,6 +116,9 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go index a7f96c22..93823927 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -12,12 +12,11 @@ import ( // Returns details about the IAM user or role whose credentials are used to call // the operation. No permissions are required to perform this operation. If an -// administrator adds a policy to your IAM user or role that explicitly denies -// access to the sts:GetCallerIdentity action, you can still perform this -// operation. Permissions are not required because the same information is returned -// when an IAM user or role is denied access. To view an example response, see I Am -// Not Authorized to Perform: iam:DeleteVirtualMFADevice -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// administrator attaches a policy to your identity that explicitly denies access +// to the sts:GetCallerIdentity action, you can still perform this operation. +// Permissions are not required because the same information is returned when +// access is denied. To view an example response, see I Am Not Authorized to +// Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) // in the IAM User Guide. func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { if params == nil { @@ -49,10 +48,9 @@ type GetCallerIdentityOutput struct { // The Amazon Web Services ARN associated with the calling entity. Arn *string - // The unique identifier of the calling entity. The exact value depends on the type - // of entity that is making the call. The values returned are those listed in the - // aws:userid column in the Principal table - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed in + // the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) // found on the Policy Variables reference page in the IAM User Guide. UserId *string @@ -110,6 +108,9 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go index 60026a13..ccb7366e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -11,49 +11,40 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a set of temporary security credentials (consisting of an access key ID, -// a secret access key, and a security token) for a federated user. A typical use -// is in a proxy application that gets temporary security credentials on behalf of +// Returns a set of temporary security credentials (consisting of an access key +// ID, a secret access key, and a security token) for a user. A typical use is in a +// proxy application that gets temporary security credentials on behalf of // distributed applications inside a corporate network. You must call the // GetFederationToken operation using the long-term security credentials of an IAM // user. As a result, this call is appropriate in contexts where those credentials -// can be safely stored, usually in a server-based application. For a comparison of +// can be safeguarded, usually in a server-based application. For a comparison of // GetFederationToken with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. Although it is possible to call GetFederationToken using +// the security credentials of an Amazon Web Services account root user rather than +// an IAM user that you create for the purpose of a proxy application, we do not +// recommend it. For more information, see Safeguard your root user credentials +// and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) // in the IAM User Guide. You can create a mobile-based or browser-based app that // can authenticate users using a web identity provider like Login with Amazon, // Facebook, Google, or an OpenID Connect-compatible identity provider. In this // case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity. For more information, see Federation Through a -// Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. You can also call GetFederationToken using the security -// credentials of an Amazon Web Services account root user, but we do not recommend -// it. Instead, we recommend that you create an IAM user for the purpose of the -// proxy application. Then attach a policy to the IAM user that limits federated -// users to only the actions and resources that they need to access. For more -// information, see IAM Best Practices -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) in the -// IAM User Guide. Session duration The temporary credentials are valid for the -// specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 +// or AssumeRoleWithWebIdentity . For more information, see Federation Through a +// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. Session duration The temporary credentials are valid for +// the specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 // seconds (36 hours). The default session duration is 43,200 seconds (12 hours). -// Temporary credentials obtained by using the Amazon Web Services account root -// user credentials have a maximum duration of 3,600 seconds (1 hour). Permissions -// You can use the temporary credentials created by GetFederationToken in any -// Amazon Web Services service except the following: +// Temporary credentials obtained by using the root user credentials have a maximum +// duration of 3,600 seconds (1 hour). Permissions You can use the temporary +// credentials created by GetFederationToken in any Amazon Web Services service +// with the following exceptions: +// - You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. This limitation does not apply to console sessions. +// - You cannot call any STS operations except GetCallerIdentity . // -// * You cannot call any IAM -// operations using the CLI or the Amazon Web Services API. -// -// * You cannot call any -// STS operations except GetCallerIdentity. -// -// You must pass an inline or managed -// session policy -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// You can use temporary credentials for single sign-on (SSO) to the console. You +// must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policy Amazon // Resource Names (ARNs) to use as managed session policies. The plaintext that you @@ -64,38 +55,33 @@ import ( // policies and the session policies that you pass. This gives you a way to further // restrict the permissions for a federated user. You cannot use session policies // to grant more permissions than those that are defined in the permissions policy -// of the IAM user. For more information, see Session Policies -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// of the IAM user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. For information about using GetFederationToken to create // temporary security credentials, see GetFederationToken—Federation Through a -// Custom Identity Broker -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). -// You can use the credentials to access a resource that has a resource-based +// Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken) +// . You can use the credentials to access a resource that has a resource-based // policy. If that policy specifically references the federated user session in the // Principal element of the policy, the session has the permissions allowed by the // policy. These permissions are granted in addition to the permissions granted by // the session policies. Tags (Optional) You can pass tag key-value pairs to your // session. These are called session tags. For more information about session tags, -// see Passing Session Tags in STS -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the -// IAM User Guide. You can create a mobile-based or browser-based app that can -// authenticate users using a web identity provider like Login with Amazon, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can create a mobile-based or browser-based app that +// can authenticate users using a web identity provider like Login with Amazon, // Facebook, Google, or an OpenID Connect-compatible identity provider. In this // case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity. For more information, see Federation Through a -// Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// or AssumeRoleWithWebIdentity . For more information, see Federation Through a +// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) // in the IAM User Guide. An administrator must grant you the permissions necessary // to pass session tags. The administrator can also create granular permissions to // allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) // in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is // preserved. This means that you cannot have separate Department and department -// tag keys. Assume that the user that you are federating has the -// Department=Marketing tag and you pass the department=engineering session tag. -// Department and department are not saved as separate tags, and the session tag -// passed in the request takes precedence over the user tag. +// tag keys. Assume that the user that you are federating has the Department = +// Marketing tag and you pass the department = engineering session tag. Department +// and department are not saved as separate tags, and the session tag passed in +// the request takes precedence over the user tag. func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { if params == nil { params = &GetFederationTokenInput{} @@ -114,26 +100,26 @@ func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTo type GetFederationTokenInput struct { // The name of the federated user. The name is used as an identifier for the - // temporary security credentials (such as Bob). For example, you can reference the - // federated user name in a resource-based policy, such as in an Amazon S3 bucket - // policy. The regex used to validate this parameter is a string of characters - // consisting of upper- and lower-case alphanumeric characters with no spaces. You - // can also include underscores or any of the following characters: =,.@- + // temporary security credentials (such as Bob ). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon S3 + // bucket policy. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- // // This member is required. Name *string - // The duration, in seconds, that the session should last. Acceptable durations for - // federation sessions range from 900 seconds (15 minutes) to 129,600 seconds (36 - // hours), with 43,200 seconds (12 hours) as the default. Sessions obtained using - // Amazon Web Services account root user credentials are restricted to a maximum of - // 3,600 seconds (one hour). If the specified duration is longer than one hour, the - // session obtained by using root user credentials defaults to one hour. + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using root user credentials are restricted to a maximum of 3,600 seconds (one + // hour). If the specified duration is longer than one hour, the session obtained + // by using root user credentials defaults to one hour. DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. - // You must pass an inline or managed session policy - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policy Amazon // Resource Names (ARNs) to use as managed session policies. This parameter is @@ -143,8 +129,7 @@ type GetFederationTokenInput struct { // session policies that you pass. This gives you a way to further restrict the // permissions for a federated user. You cannot use session policies to grant more // permissions than those that are defined in the permissions policy of the IAM - // user. For more information, see Session Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. The resulting credentials can be used to access a // resource that has a resource-based policy. If that policy specifically // references the federated user session in the Principal element of the policy, @@ -165,24 +150,21 @@ type GetFederationTokenInput struct { // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as a managed session policy. The policies must exist in the same account as // the IAM user that is requesting federated access. You must pass an inline or - // managed session policy - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policy Amazon // Resource Names (ARNs) to use as managed session policies. The plaintext that you // use for both inline and managed session policies can't exceed 2,048 characters. // You can provide up to 10 managed policy ARNs. For more information about ARNs, - // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in - // the Amazon Web Services General Reference. This parameter is optional. However, - // if you do not pass any session policies, then the resulting federated user - // session has no permissions. When you pass session policies, the session + // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. This parameter is optional. + // However, if you do not pass any session policies, then the resulting federated + // user session has no permissions. When you pass session policies, the session // permissions are the intersection of the IAM user policies and the session // policies that you pass. This gives you a way to further restrict the permissions // for a federated user. You cannot use session policies to grant more permissions // than those that are defined in the permissions policy of the IAM user. For more - // information, see Session Policies - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. The resulting credentials can be used to access a // resource that has a resource-based policy. If that policy specifically // references the federated user session in the Principal element of the policy, @@ -191,20 +173,18 @@ type GetFederationTokenInput struct { // An Amazon Web Services conversion compresses the passed inline session policy, // managed policy ARNs, and session tags into a packed binary format that has a // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates by - // percentage how close the policies and tags for your request are to the upper + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper // size limit. PolicyArns []types.PolicyDescriptorType // A list of session tags. Each session tag consists of a key name and an // associated value. For more information about session tags, see Passing Session - // Tags in STS - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the - // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. - // The plaintext session tag keys can’t exceed 128 characters and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. This parameter is optional. You can pass up to 50 session + // tags. The plaintext session tag keys can’t exceed 128 characters and the values + // can’t exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. An Amazon Web Services conversion compresses the passed // inline session policy, managed policy ARNs, and session tags into a packed // binary format that has a separate limit. Your request can fail for this limit @@ -215,9 +195,9 @@ type GetFederationTokenInput struct { // you do, session tags override a user tag with the same key. Tag key–value pairs // are not case sensitive, but case is preserved. This means that you cannot have // separate Department and department tag keys. Assume that the role has the - // Department=Marketing tag and you pass the department=engineering session tag. - // Department and department are not saved as separate tags, and the session tag - // passed in the request takes precedence over the role tag. + // Department = Marketing tag and you pass the department = engineering session + // tag. Department and department are not saved as separate tags, and the session + // tag passed in the request takes precedence over the role tag. Tags []types.Tag noSmithyDocumentSerde @@ -235,7 +215,7 @@ type GetFederationTokenOutput struct { Credentials *types.Credentials // Identifiers for the federated user associated with the credentials (such as - // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You can use + // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob ). You can use // the federated user's ARN in your resource-based policies, such as an Amazon S3 // bucket policy. FederatedUser *types.FederatedUser @@ -303,6 +283,9 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go index bfde5168..4dfac4c9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -11,59 +11,46 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a set of temporary credentials for an Amazon Web Services account or IAM -// user. The credentials consist of an access key ID, a secret access key, and a -// security token. Typically, you use GetSessionToken if you want to use MFA to +// Returns a set of temporary credentials for an Amazon Web Services account or +// IAM user. The credentials consist of an access key ID, a secret access key, and +// a security token. Typically, you use GetSessionToken if you want to use MFA to // protect programmatic calls to specific Amazon Web Services API operations like -// Amazon EC2 StopInstances. MFA-enabled IAM users would need to call -// GetSessionToken and submit an MFA code that is associated with their MFA device. -// Using the temporary security credentials that are returned from the call, IAM -// users can then make programmatic calls to API operations that require MFA -// authentication. If you do not supply a correct MFA code, then the API returns an -// access denied error. For a comparison of GetSessionToken with the other API -// operations that produce temporary credentials, see Requesting Temporary Security -// Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// Amazon EC2 StopInstances . MFA-enabled IAM users must call GetSessionToken and +// submit an MFA code that is associated with their MFA device. Using the temporary +// security credentials that the call returns, IAM users can then make programmatic +// calls to API operations that require MFA authentication. An incorrect MFA code +// causes the API to return an access denied error. For a comparison of +// GetSessionToken with the other API operations that produce temporary +// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. No permissions are required for users to perform this // operation. The purpose of the sts:GetSessionToken operation is to authenticate // the user using MFA. You cannot use policies to control authentication -// operations. For more information, see Permissions for GetSessionToken -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) +// operations. For more information, see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) // in the IAM User Guide. Session Duration The GetSessionToken operation must be -// called by using the long-term Amazon Web Services security credentials of the -// Amazon Web Services account root user or an IAM user. Credentials that are -// created by IAM users are valid for the duration that you specify. This duration -// can range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 -// hours), with a default of 43,200 seconds (12 hours). Credentials based on -// account credentials can range from 900 seconds (15 minutes) up to 3,600 seconds -// (1 hour), with a default of 1 hour. Permissions The temporary security -// credentials created by GetSessionToken can be used to make API calls to any -// Amazon Web Services service with the following exceptions: +// called by using the long-term Amazon Web Services security credentials of an IAM +// user. Credentials that are created by IAM users are valid for the duration that +// you specify. This duration can range from 900 seconds (15 minutes) up to a +// maximum of 129,600 seconds (36 hours), with a default of 43,200 seconds (12 +// hours). Credentials based on account credentials can range from 900 seconds (15 +// minutes) up to 3,600 seconds (1 hour), with a default of 1 hour. Permissions The +// temporary security credentials created by GetSessionToken can be used to make +// API calls to any Amazon Web Services service with the following exceptions: +// - You cannot call any IAM API operations unless MFA authentication +// information is included in the request. +// - You cannot call any STS API except AssumeRole or GetCallerIdentity . // -// * You cannot call -// any IAM API operations unless MFA authentication information is included in the -// request. -// -// * You cannot call any STS API except AssumeRole or -// GetCallerIdentity. -// -// We recommend that you do not call GetSessionToken with -// Amazon Web Services account root user credentials. Instead, follow our best -// practices -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) -// by creating one or more IAM users, giving them the necessary permissions, and -// using IAM users for everyday interaction with Amazon Web Services. The -// credentials that are returned by GetSessionToken are based on permissions -// associated with the user whose credentials were used to call the operation. If -// GetSessionToken is called using Amazon Web Services account root user -// credentials, the temporary credentials have root user permissions. Similarly, if -// GetSessionToken is called using the credentials of an IAM user, the temporary -// credentials have the same permissions as the IAM user. For more information -// about using GetSessionToken to create temporary credentials, go to Temporary -// Credentials for Users in Untrusted Environments -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// The credentials that GetSessionToken returns are based on permissions +// associated with the IAM user whose credentials were used to call the operation. +// The temporary credentials have the same permissions as the IAM user. Although it +// is possible to call GetSessionToken using the security credentials of an Amazon +// Web Services account root user rather than an IAM user, we do not recommend it. +// If GetSessionToken is called using root user credentials, the temporary +// credentials have root user permissions. For more information, see Safeguard +// your root user credentials and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide For more information about using GetSessionToken to +// create temporary credentials, see Temporary Credentials for Users in Untrusted +// Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) // in the IAM User Guide. func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { if params == nil { @@ -90,25 +77,25 @@ type GetSessionTokenInput struct { // Services account owners defaults to one hour. DurationSeconds *int32 - // The identification number of the MFA device that is associated with the IAM user - // who is making the GetSessionToken call. Specify this value if the IAM user has a - // policy that requires MFA authentication. The value is either the serial number - // for a hardware device (such as GAHT12345678) or an Amazon Resource Name (ARN) - // for a virtual device (such as arn:aws:iam::123456789012:mfa/user). You can find - // the device for an IAM user by going to the Amazon Web Services Management - // Console and viewing the user's security credentials. The regex used to validate - // this parameter is a string of characters consisting of upper- and lower-case - // alphanumeric characters with no spaces. You can also include underscores or any - // of the following characters: =,.@:/- + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM user + // has a policy that requires MFA authentication. The value is either the serial + // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name + // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You + // can find the device for an IAM user by going to the Amazon Web Services + // Management Console and viewing the user's security credentials. The regex used + // to validate this parameter is a string of characters consisting of upper- and + // lower-case alphanumeric characters with no spaces. You can also include + // underscores or any of the following characters: =,.@:/- SerialNumber *string - // The value provided by the MFA device, if MFA is required. If any policy requires - // the IAM user to submit an MFA code, specify this value. If MFA authentication is - // required, the user must provide a code when requesting a set of temporary - // security credentials. A user who fails to provide the code receives an "access - // denied" response when requesting resources that require MFA authentication. The - // format for this parameter, as described by its regex pattern, is a sequence of - // six numeric digits. + // The value provided by the MFA device, if MFA is required. If any policy + // requires the IAM user to submit an MFA code, specify this value. If MFA + // authentication is required, the user must provide a code when requesting a set + // of temporary security credentials. A user who fails to provide the code receives + // an "access denied" response when requesting resources that require MFA + // authentication. The format for this parameter, as described by its regex + // pattern, is a sequence of six numeric digits. TokenCode *string noSmithyDocumentSerde @@ -179,6 +166,9 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go index 7cabbb97..d963fd8d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go @@ -4,9 +4,8 @@ // Security Token Service. // // Security Token Service Security Token Service (STS) enables you to request -// temporary, limited-privilege credentials for Identity and Access Management -// (IAM) users or for users that you authenticate (federated users). This guide -// provides descriptions of the STS API. For more information about using this -// service, see Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// temporary, limited-privilege credentials for users. This guide provides +// descriptions of the STS API. For more information about using this service, see +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) +// . package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 2755e647..f88dca99 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.0" +const goModuleVersion = "1.19.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go index ce9acedc..0413fd89 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -89,6 +89,8 @@ var partitionRegexp = struct { AwsCn *regexp.Regexp AwsIso *regexp.Regexp AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp AwsUsGov *regexp.Regexp }{ @@ -96,6 +98,8 @@ var partitionRegexp = struct { AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), } @@ -165,6 +169,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-southeast-3", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "aws-global", }: endpoints.Endpoint{ @@ -381,6 +388,48 @@ var defaultPartitions = endpoints.Partitions{ }: endpoints.Endpoint{}, }, }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, { ID: "aws-us-gov", Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go index 05531d36..eb60f61b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go @@ -523,9 +523,6 @@ func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Con return next.HandleSerialize(ctx, in) } func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error { - if len(v) == 0 { - return nil - } array := value.Array("member") for i := range v { @@ -567,9 +564,6 @@ func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error { } func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error { - if len(v) == 0 { - return nil - } array := value.Array("member") for i := range v { @@ -580,9 +574,6 @@ func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) } func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error { - if len(v) == 0 { - return nil - } array := value.Array("member") for i := range v { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go index 88d3e6c6..097875b2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -27,7 +27,7 @@ func (e *ExpiredTokenException) ErrorMessage() string { return *e.Message } func (e *ExpiredTokenException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "ExpiredTokenException" } return *e.ErrorCodeOverride @@ -57,7 +57,7 @@ func (e *IDPCommunicationErrorException) ErrorMessage() string { return *e.Message } func (e *IDPCommunicationErrorException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "IDPCommunicationError" } return *e.ErrorCodeOverride @@ -86,7 +86,7 @@ func (e *IDPRejectedClaimException) ErrorMessage() string { return *e.Message } func (e *IDPRejectedClaimException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "IDPRejectedClaim" } return *e.ErrorCodeOverride @@ -114,7 +114,7 @@ func (e *InvalidAuthorizationMessageException) ErrorMessage() string { return *e.Message } func (e *InvalidAuthorizationMessageException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidAuthorizationMessageException" } return *e.ErrorCodeOverride @@ -144,7 +144,7 @@ func (e *InvalidIdentityTokenException) ErrorMessage() string { return *e.Message } func (e *InvalidIdentityTokenException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "InvalidIdentityToken" } return *e.ErrorCodeOverride @@ -171,7 +171,7 @@ func (e *MalformedPolicyDocumentException) ErrorMessage() string { return *e.Message } func (e *MalformedPolicyDocumentException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "MalformedPolicyDocument" } return *e.ErrorCodeOverride @@ -183,12 +183,10 @@ func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { retu // compresses the session policy document, session policy ARNs, and session tags // into a packed binary format that has a separate limit. The error message // indicates by percentage how close the policies and tags are to the upper size -// limit. For more information, see Passing Session Tags in STS -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the -// IAM User Guide. You could receive this error even though you meet other defined -// session policy and session tag limits. For more information, see IAM and STS -// Entity Character Limits -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You could receive this error even though you meet other +// defined session policy and session tag limits. For more information, see IAM +// and STS Entity Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) // in the IAM User Guide. type PackedPolicyTooLargeException struct { Message *string @@ -208,18 +206,17 @@ func (e *PackedPolicyTooLargeException) ErrorMessage() string { return *e.Message } func (e *PackedPolicyTooLargeException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "PackedPolicyTooLarge" } return *e.ErrorCodeOverride } func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// STS is not activated in the requested region for the account that is being asked -// to generate credentials. The account administrator must use the IAM console to -// activate STS in that region. For more information, see Activating and -// Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. type RegionDisabledException struct { Message *string @@ -239,7 +236,7 @@ func (e *RegionDisabledException) ErrorMessage() string { return *e.Message } func (e *RegionDisabledException) ErrorCode() string { - if e.ErrorCodeOverride == nil { + if e == nil || e.ErrorCodeOverride == nil { return "RegionDisabledException" } return *e.ErrorCodeOverride diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go index 86e50990..90d4f62a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go @@ -13,9 +13,8 @@ type AssumedRoleUser struct { // The ARN of the temporary security credentials that are returned from the // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in - // the IAM User Guide. + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. // // This member is required. Arn *string @@ -62,9 +61,8 @@ type FederatedUser struct { // The ARN that specifies the federated user that is associated with the // credentials. For more information about ARNs and how to use them in policies, - // see IAM Identifiers - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in - // the IAM User Guide. + // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. // // This member is required. Arn *string @@ -84,26 +82,23 @@ type PolicyDescriptorType struct { // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session // policy for the role. For more information about ARNs, see Amazon Resource Names - // (ARNs) and Amazon Web Services Service Namespaces - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in - // the Amazon Web Services General Reference. + // (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. Arn *string noSmithyDocumentSerde } -// You can pass custom key-value pair attributes when you assume a role or federate -// a user. These are called session tags. You can then use the session tags to -// control access to resources. For more information, see Tagging Amazon Web -// Services STS Sessions -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the -// IAM User Guide. +// You can pass custom key-value pair attributes when you assume a role or +// federate a user. These are called session tags. You can then use the session +// tags to control access to resources. For more information, see Tagging Amazon +// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. type Tag struct { // The key for a session tag. You can pass up to 50 session tags. The plain text // session tag keys can’t exceed 128 characters. For these and additional limits, - // see IAM and STS Character Limits - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // This member is required. @@ -111,8 +106,7 @@ type Tag struct { // The value for a session tag. You can pass up to 50 session tags. The plain text // session tag values can’t exceed 256 characters. For these and additional limits, - // see IAM and STS Character Limits - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // This member is required. diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go index 875b0681..9cacb32a 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.12.3 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opencensus/proto/agent/common/v1/common.proto // NOTE: This proto is experimental and is subject to change at this point. @@ -24,10 +24,9 @@ package v1 import ( - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -39,10 +38,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type LibraryInfo_Language int32 const ( @@ -207,7 +202,7 @@ type ProcessIdentifier struct { // Process id. Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` // Start time of this ProcessIdentifier. Represented in epoch time. - StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + StartTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` } func (x *ProcessIdentifier) Reset() { @@ -256,7 +251,7 @@ func (x *ProcessIdentifier) GetPid() uint32 { return 0 } -func (x *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp { +func (x *ProcessIdentifier) GetStartTimestamp() *timestamppb.Timestamp { if x != nil { return x.StartTimestamp } @@ -447,7 +442,7 @@ var file_opencensus_proto_agent_common_v1_common_proto_rawDesc = []byte{ 0x10, 0x09, 0x12, 0x0a, 0x0a, 0x06, 0x57, 0x45, 0x42, 0x5f, 0x4a, 0x53, 0x10, 0x0a, 0x22, 0x21, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x42, 0xa2, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, + 0x65, 0x42, 0xa6, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x49, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, @@ -455,9 +450,10 @@ var file_opencensus_proto_agent_common_v1_common_proto_rawDesc = []byte{ 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x2f, 0x76, 0x31, 0xea, 0x02, 0x20, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, - 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2f, 0x76, 0x31, 0xea, 0x02, 0x24, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, + 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x3a, 0x3a, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -475,13 +471,13 @@ func file_opencensus_proto_agent_common_v1_common_proto_rawDescGZIP() []byte { var file_opencensus_proto_agent_common_v1_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_opencensus_proto_agent_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_opencensus_proto_agent_common_v1_common_proto_goTypes = []interface{}{ - (LibraryInfo_Language)(0), // 0: opencensus.proto.agent.common.v1.LibraryInfo.Language - (*Node)(nil), // 1: opencensus.proto.agent.common.v1.Node - (*ProcessIdentifier)(nil), // 2: opencensus.proto.agent.common.v1.ProcessIdentifier - (*LibraryInfo)(nil), // 3: opencensus.proto.agent.common.v1.LibraryInfo - (*ServiceInfo)(nil), // 4: opencensus.proto.agent.common.v1.ServiceInfo - nil, // 5: opencensus.proto.agent.common.v1.Node.AttributesEntry - (*timestamp.Timestamp)(nil), // 6: google.protobuf.Timestamp + (LibraryInfo_Language)(0), // 0: opencensus.proto.agent.common.v1.LibraryInfo.Language + (*Node)(nil), // 1: opencensus.proto.agent.common.v1.Node + (*ProcessIdentifier)(nil), // 2: opencensus.proto.agent.common.v1.ProcessIdentifier + (*LibraryInfo)(nil), // 3: opencensus.proto.agent.common.v1.LibraryInfo + (*ServiceInfo)(nil), // 4: opencensus.proto.agent.common.v1.ServiceInfo + nil, // 5: opencensus.proto.agent.common.v1.Node.AttributesEntry + (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp } var file_opencensus_proto_agent_common_v1_common_proto_depIdxs = []int32{ 2, // 0: opencensus.proto.agent.common.v1.Node.identifier:type_name -> opencensus.proto.agent.common.v1.ProcessIdentifier diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go index bfb43893..4f9faffd 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go @@ -14,17 +14,20 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.12.3 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opencensus/proto/agent/metrics/v1/metrics_service.proto package v1 import ( + context "context" v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +41,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type ExportMetricsServiceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -195,7 +194,7 @@ var file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDesc = []byt 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0xad, 0x01, 0x0a, 0x24, 0x69, 0x6f, 0x2e, 0x6f, + 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0xb1, 0x01, 0x0a, 0x24, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x13, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, @@ -204,9 +203,10 @@ var file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDesc = []byt 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x2f, 0x76, 0x31, 0xea, 0x02, 0x21, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, - 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2f, 0x76, 0x31, 0xea, 0x02, 0x25, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, + 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x3a, 0x3a, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -292,3 +292,119 @@ func file_opencensus_proto_agent_metrics_v1_metrics_service_proto_init() { file_opencensus_proto_agent_metrics_v1_metrics_service_proto_goTypes = nil file_opencensus_proto_agent_metrics_v1_metrics_service_proto_depIdxs = nil } + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// MetricsServiceClient is the client API for MetricsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricsServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) +} + +type metricsServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) { + stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...) + if err != nil { + return nil, err + } + x := &metricsServiceExportClient{stream} + return x, nil +} + +type MetricsService_ExportClient interface { + Send(*ExportMetricsServiceRequest) error + Recv() (*ExportMetricsServiceResponse, error) + grpc.ClientStream +} + +type metricsServiceExportClient struct { + grpc.ClientStream +} + +func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) { + m := new(ExportMetricsServiceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// MetricsServiceServer is the server API for MetricsService service. +type MetricsServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(MetricsService_ExportServer) error +} + +// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetricsServiceServer struct { +} + +func (*UnimplementedMetricsServiceServer) Export(MetricsService_ExportServer) error { + return status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { + s.RegisterService(&_MetricsService_serviceDesc, srv) +} + +func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream}) +} + +type MetricsService_ExportServer interface { + Send(*ExportMetricsServiceResponse) error + Recv() (*ExportMetricsServiceRequest, error) + grpc.ServerStream +} + +type metricsServiceExportServer struct { + grpc.ServerStream +} + +func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) { + m := new(ExportMetricsServiceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _MetricsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Export", + Handler: _MetricsService_Export_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto", +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go index 722e8efd..3cc9bae4 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go @@ -13,14 +13,14 @@ import ( "io" "net/http" - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // Suppress "imported and not used" errors @@ -29,7 +29,7 @@ var _ io.Reader var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage +var _ = metadata.Join func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (MetricsService_ExportClient, runtime.ServerMetadata, error) { var metadata runtime.ServerMetadata @@ -55,15 +55,6 @@ func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Mars } return nil } - if err := handleSend(); err != nil { - if cerr := stream.CloseSend(); cerr != nil { - grpclog.Infof("Failed to terminate client stream: %v", cerr) - } - if err == io.EOF { - return stream, metadata, nil - } - return nil, metadata, err - } go func() { for { if err := handleSend(); err != nil { @@ -86,6 +77,7 @@ func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Mars // RegisterMetricsServiceHandlerServer registers the http handlers for service MetricsService to "mux". // UnaryRPC :call MetricsServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMetricsServiceHandlerFromEndpoint instead. func RegisterMetricsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MetricsServiceServer) error { mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -140,7 +132,7 @@ func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.Serve ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opencensus.proto.agent.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -160,7 +152,7 @@ func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.Serve } var ( - pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "")) ) var ( diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service_grpc.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service_grpc.pb.go deleted file mode 100644 index 0a600256..00000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service_grpc.pb.go +++ /dev/null @@ -1,126 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// MetricsServiceClient is the client API for MetricsService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type MetricsServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) -} - -type metricsServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient { - return &metricsServiceClient{cc} -} - -func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) { - stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...) - if err != nil { - return nil, err - } - x := &metricsServiceExportClient{stream} - return x, nil -} - -type MetricsService_ExportClient interface { - Send(*ExportMetricsServiceRequest) error - Recv() (*ExportMetricsServiceResponse, error) - grpc.ClientStream -} - -type metricsServiceExportClient struct { - grpc.ClientStream -} - -func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) { - m := new(ExportMetricsServiceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// MetricsServiceServer is the server API for MetricsService service. -// All implementations must embed UnimplementedMetricsServiceServer -// for forward compatibility -type MetricsServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(MetricsService_ExportServer) error - mustEmbedUnimplementedMetricsServiceServer() -} - -// UnimplementedMetricsServiceServer must be embedded to have forward compatible implementations. -type UnimplementedMetricsServiceServer struct { -} - -func (*UnimplementedMetricsServiceServer) Export(MetricsService_ExportServer) error { - return status.Errorf(codes.Unimplemented, "method Export not implemented") -} -func (*UnimplementedMetricsServiceServer) mustEmbedUnimplementedMetricsServiceServer() {} - -func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { - s.RegisterService(&_MetricsService_serviceDesc, srv) -} - -func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream}) -} - -type MetricsService_ExportServer interface { - Send(*ExportMetricsServiceResponse) error - Recv() (*ExportMetricsServiceRequest, error) - grpc.ServerStream -} - -type metricsServiceExportServer struct { - grpc.ServerStream -} - -func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) { - m := new(ExportMetricsServiceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _MetricsService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService", - HandlerType: (*MetricsServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Export", - Handler: _MetricsService_Export_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto", -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go index 0eed170b..c713bfb4 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.12.3 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opencensus/proto/agent/trace/v1/trace_service.proto // NOTE: This proto is experimental and is subject to change at this point. @@ -24,10 +24,13 @@ package v1 import ( + context "context" v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" - proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -41,10 +44,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type CurrentLibraryConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -342,7 +341,7 @@ var file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDesc = []byte{ 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0xa5, 0x01, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0xa9, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, @@ -351,9 +350,10 @@ var file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDesc = []byte{ 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x2f, 0x76, 0x31, 0xea, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, - 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x72, 0x61, - 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2f, 0x76, 0x31, 0xea, 0x02, 0x23, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, + 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x3a, 0x3a, + 0x54, 0x72, 0x61, 0x63, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -472,3 +472,193 @@ func file_opencensus_proto_agent_trace_v1_trace_service_proto_init() { file_opencensus_proto_agent_trace_v1_trace_service_proto_goTypes = nil file_opencensus_proto_agent_trace_v1_trace_service_proto_depIdxs = nil } + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TraceServiceClient interface { + // After initialization, this RPC must be kept alive for the entire life of + // the application. The agent pushes configs down to applications via a + // stream. + Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) +} + +type traceServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) { + stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...) + if err != nil { + return nil, err + } + x := &traceServiceConfigClient{stream} + return x, nil +} + +type TraceService_ConfigClient interface { + Send(*CurrentLibraryConfig) error + Recv() (*UpdatedLibraryConfig, error) + grpc.ClientStream +} + +type traceServiceConfigClient struct { + grpc.ClientStream +} + +func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error { + return x.ClientStream.SendMsg(m) +} + +func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) { + m := new(UpdatedLibraryConfig) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) { + stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...) + if err != nil { + return nil, err + } + x := &traceServiceExportClient{stream} + return x, nil +} + +type TraceService_ExportClient interface { + Send(*ExportTraceServiceRequest) error + Recv() (*ExportTraceServiceResponse, error) + grpc.ClientStream +} + +type traceServiceExportClient struct { + grpc.ClientStream +} + +func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) { + m := new(ExportTraceServiceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// TraceServiceServer is the server API for TraceService service. +type TraceServiceServer interface { + // After initialization, this RPC must be kept alive for the entire life of + // the application. The agent pushes configs down to applications via a + // stream. + Config(TraceService_ConfigServer) error + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(TraceService_ExportServer) error +} + +// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations. +type UnimplementedTraceServiceServer struct { +} + +func (*UnimplementedTraceServiceServer) Config(TraceService_ConfigServer) error { + return status.Errorf(codes.Unimplemented, "method Config not implemented") +} +func (*UnimplementedTraceServiceServer) Export(TraceService_ExportServer) error { + return status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream}) +} + +type TraceService_ConfigServer interface { + Send(*UpdatedLibraryConfig) error + Recv() (*CurrentLibraryConfig, error) + grpc.ServerStream +} + +type traceServiceConfigServer struct { + grpc.ServerStream +} + +func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error { + return x.ServerStream.SendMsg(m) +} + +func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) { + m := new(CurrentLibraryConfig) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream}) +} + +type TraceService_ExportServer interface { + Send(*ExportTraceServiceResponse) error + Recv() (*ExportTraceServiceRequest, error) + grpc.ServerStream +} + +type traceServiceExportServer struct { + grpc.ServerStream +} + +func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) { + m := new(ExportTraceServiceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opencensus.proto.agent.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Config", + Handler: _TraceService_Config_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Export", + Handler: _TraceService_Export_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto", +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go index 4733c404..7808e9fb 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go @@ -13,14 +13,14 @@ import ( "io" "net/http" - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // Suppress "imported and not used" errors @@ -29,7 +29,7 @@ var _ io.Reader var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage +var _ = metadata.Join func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) { var metadata runtime.ServerMetadata @@ -55,15 +55,6 @@ func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marsha } return nil } - if err := handleSend(); err != nil { - if cerr := stream.CloseSend(); cerr != nil { - grpclog.Infof("Failed to terminate client stream: %v", cerr) - } - if err == io.EOF { - return stream, metadata, nil - } - return nil, metadata, err - } go func() { for { if err := handleSend(); err != nil { @@ -86,6 +77,7 @@ func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marsha // RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux". // UnaryRPC :call TraceServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterTraceServiceHandlerFromEndpoint instead. func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error { mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -140,7 +132,7 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opencensus.proto.agent.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -160,7 +152,7 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu } var ( - pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "")) ) var ( diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service_grpc.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service_grpc.pb.go deleted file mode 100644 index 90bda0aa..00000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service_grpc.pb.go +++ /dev/null @@ -1,200 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// TraceServiceClient is the client API for TraceService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type TraceServiceClient interface { - // After initialization, this RPC must be kept alive for the entire life of - // the application. The agent pushes configs down to applications via a - // stream. - Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) -} - -type traceServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient { - return &traceServiceClient{cc} -} - -func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) { - stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...) - if err != nil { - return nil, err - } - x := &traceServiceConfigClient{stream} - return x, nil -} - -type TraceService_ConfigClient interface { - Send(*CurrentLibraryConfig) error - Recv() (*UpdatedLibraryConfig, error) - grpc.ClientStream -} - -type traceServiceConfigClient struct { - grpc.ClientStream -} - -func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error { - return x.ClientStream.SendMsg(m) -} - -func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) { - m := new(UpdatedLibraryConfig) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) { - stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...) - if err != nil { - return nil, err - } - x := &traceServiceExportClient{stream} - return x, nil -} - -type TraceService_ExportClient interface { - Send(*ExportTraceServiceRequest) error - Recv() (*ExportTraceServiceResponse, error) - grpc.ClientStream -} - -type traceServiceExportClient struct { - grpc.ClientStream -} - -func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) { - m := new(ExportTraceServiceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// TraceServiceServer is the server API for TraceService service. -// All implementations must embed UnimplementedTraceServiceServer -// for forward compatibility -type TraceServiceServer interface { - // After initialization, this RPC must be kept alive for the entire life of - // the application. The agent pushes configs down to applications via a - // stream. - Config(TraceService_ConfigServer) error - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(TraceService_ExportServer) error - mustEmbedUnimplementedTraceServiceServer() -} - -// UnimplementedTraceServiceServer must be embedded to have forward compatible implementations. -type UnimplementedTraceServiceServer struct { -} - -func (*UnimplementedTraceServiceServer) Config(TraceService_ConfigServer) error { - return status.Errorf(codes.Unimplemented, "method Config not implemented") -} -func (*UnimplementedTraceServiceServer) Export(TraceService_ExportServer) error { - return status.Errorf(codes.Unimplemented, "method Export not implemented") -} -func (*UnimplementedTraceServiceServer) mustEmbedUnimplementedTraceServiceServer() {} - -func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { - s.RegisterService(&_TraceService_serviceDesc, srv) -} - -func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream}) -} - -type TraceService_ConfigServer interface { - Send(*UpdatedLibraryConfig) error - Recv() (*CurrentLibraryConfig, error) - grpc.ServerStream -} - -type traceServiceConfigServer struct { - grpc.ServerStream -} - -func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error { - return x.ServerStream.SendMsg(m) -} - -func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) { - m := new(CurrentLibraryConfig) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream}) -} - -type TraceService_ExportServer interface { - Send(*ExportTraceServiceResponse) error - Recv() (*ExportTraceServiceRequest, error) - grpc.ServerStream -} - -type traceServiceExportServer struct { - grpc.ServerStream -} - -func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) { - m := new(ExportTraceServiceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _TraceService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opencensus.proto.agent.trace.v1.TraceService", - HandlerType: (*TraceServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Config", - Handler: _TraceService_Config_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "Export", - Handler: _TraceService_Export_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto", -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go index 30f75832..0cac88b2 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go @@ -19,19 +19,18 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.12.3 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opencensus/proto/metrics/v1/metrics.proto package v1 import ( v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -43,10 +42,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The kind of metric. It describes how the data is reported. // // A gauge is an instantaneous measurement of a value. @@ -364,7 +359,7 @@ type TimeSeries struct { // was reset to zero. Exclusive. The cumulative value is over the time interval // (start_timestamp, timestamp]. If not specified, the backend can use the // previous recorded value. - StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + StartTimestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` // The set of label values that uniquely identify this timeseries. Applies to // all points. The order of label values must match that of label keys in the // metric descriptor. @@ -406,7 +401,7 @@ func (*TimeSeries) Descriptor() ([]byte, []int) { return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{3} } -func (x *TimeSeries) GetStartTimestamp() *timestamp.Timestamp { +func (x *TimeSeries) GetStartTimestamp() *timestamppb.Timestamp { if x != nil { return x.StartTimestamp } @@ -493,7 +488,7 @@ type Point struct { // The moment when this point was recorded. Inclusive. // If not specified, the timestamp will be decided by the backend. - Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // The actual point value. // // Types that are assignable to Value: @@ -536,7 +531,7 @@ func (*Point) Descriptor() ([]byte, []int) { return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{5} } -func (x *Point) GetTimestamp() *timestamp.Timestamp { +func (x *Point) GetTimestamp() *timestamppb.Timestamp { if x != nil { return x.Timestamp } @@ -721,11 +716,11 @@ type SummaryValue struct { // The total number of recorded values since start_time. Optional since // some systems don't expose this. - Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` + Count *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` // The total sum of recorded values since start_time. Optional since some // systems don't expose this. If count is zero then this field must be zero. // This field must be unset if the sum is not available. - Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` + Sum *wrapperspb.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` // Values calculated over an arbitrary time window. Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"` } @@ -762,14 +757,14 @@ func (*SummaryValue) Descriptor() ([]byte, []int) { return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{7} } -func (x *SummaryValue) GetCount() *wrappers.Int64Value { +func (x *SummaryValue) GetCount() *wrapperspb.Int64Value { if x != nil { return x.Count } return nil } -func (x *SummaryValue) GetSum() *wrappers.DoubleValue { +func (x *SummaryValue) GetSum() *wrapperspb.DoubleValue { if x != nil { return x.Sum } @@ -926,7 +921,7 @@ type DistributionValue_Exemplar struct { // belongs to. Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` // The observation (sampling) time of the above value. - Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Contextual information about the example value. Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -970,7 +965,7 @@ func (x *DistributionValue_Exemplar) GetValue() float64 { return 0 } -func (x *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp { +func (x *DistributionValue_Exemplar) GetTimestamp() *timestamppb.Timestamp { if x != nil { return x.Timestamp } @@ -1048,11 +1043,11 @@ type SummaryValue_Snapshot struct { // The number of values in the snapshot. Optional since some systems don't // expose this. - Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` + Count *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` // The sum of values in the snapshot. Optional since some systems don't // expose this. If count is zero then this field must be zero or not set // (if not supported). - Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` + Sum *wrapperspb.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` // A list of values at different percentiles of the distribution calculated // from the current snapshot. The percentiles must be strictly increasing. PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"` @@ -1090,14 +1085,14 @@ func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) { return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{7, 0} } -func (x *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value { +func (x *SummaryValue_Snapshot) GetCount() *wrapperspb.Int64Value { if x != nil { return x.Count } return nil } -func (x *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue { +func (x *SummaryValue_Snapshot) GetSum() *wrapperspb.DoubleValue { if x != nil { return x.Sum } @@ -1351,7 +1346,7 @@ var file_opencensus_proto_metrics_v1_metrics_proto_rawDesc = []byte{ 0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x94, 0x01, 0x0a, 0x1e, 0x69, 0x6f, 0x2e, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x97, 0x01, 0x0a, 0x1e, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, @@ -1359,9 +1354,9 @@ var file_opencensus_proto_metrics_v1_metrics_proto_rawDesc = []byte{ 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, - 0x31, 0xea, 0x02, 0x1b, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x56, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x31, 0xea, 0x02, 0x1e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x3a, 0x3a, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x3a, 0x3a, + 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1396,9 +1391,9 @@ var file_opencensus_proto_metrics_v1_metrics_proto_goTypes = []interface{}{ (*SummaryValue_Snapshot)(nil), // 14: opencensus.proto.metrics.v1.SummaryValue.Snapshot (*SummaryValue_Snapshot_ValueAtPercentile)(nil), // 15: opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile (*v1.Resource)(nil), // 16: opencensus.proto.resource.v1.Resource - (*timestamp.Timestamp)(nil), // 17: google.protobuf.Timestamp - (*wrappers.Int64Value)(nil), // 18: google.protobuf.Int64Value - (*wrappers.DoubleValue)(nil), // 19: google.protobuf.DoubleValue + (*timestamppb.Timestamp)(nil), // 17: google.protobuf.Timestamp + (*wrapperspb.Int64Value)(nil), // 18: google.protobuf.Int64Value + (*wrapperspb.DoubleValue)(nil), // 19: google.protobuf.DoubleValue } var file_opencensus_proto_metrics_v1_metrics_proto_depIdxs = []int32{ 2, // 0: opencensus.proto.metrics.v1.Metric.metric_descriptor:type_name -> opencensus.proto.metrics.v1.MetricDescriptor diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go index 692746c0..194dd70d 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.12.3 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opencensus/proto/resource/v1/resource.proto package v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Resource information. type Resource struct { state protoimpl.MessageState @@ -115,7 +110,7 @@ var file_opencensus_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x42, 0x98, 0x01, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, + 0x02, 0x38, 0x01, 0x42, 0x9b, 0x01, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, @@ -123,9 +118,9 @@ var file_opencensus_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xea, - 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x3a, 0x3a, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x3a, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go index 66664fe1..d35612ca 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go @@ -14,19 +14,18 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.12.3 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opencensus/proto/trace/v1/trace.proto package v1 import ( v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - wrappers "github.com/golang/protobuf/ptypes/wrappers" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Type of span. Can be used to specify additional relationships between spans // in addition to a parent/child relationship. type Span_SpanKind int32 @@ -272,7 +267,7 @@ type Span struct { // keep end_time > start_time for consistency. // // This field is required. - StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + StartTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` // The end time of the span. On the client side, this is the time kept by // the local machine where the span execution ends. On the server side, this // is the time when the server application handler stops running. @@ -282,7 +277,7 @@ type Span struct { // keep end_time > start_time for consistency. // // This field is required. - EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` // A set of attributes on the span. Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` // A stack trace captured at the start of the span. @@ -304,10 +299,10 @@ type Span struct { // to the same process as the current span. This flag is most commonly // used to indicate the need to adjust time as clocks in different // processes may not be synchronized. - SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` + SameProcessAsParentSpan *wrapperspb.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` // An optional number of child spans that were generated while this span // was active. If set, allows an implementation to detect missing child spans. - ChildSpanCount *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` + ChildSpanCount *wrapperspb.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` } func (x *Span) Reset() { @@ -384,14 +379,14 @@ func (x *Span) GetKind() Span_SpanKind { return Span_SPAN_KIND_UNSPECIFIED } -func (x *Span) GetStartTime() *timestamp.Timestamp { +func (x *Span) GetStartTime() *timestamppb.Timestamp { if x != nil { return x.StartTime } return nil } -func (x *Span) GetEndTime() *timestamp.Timestamp { +func (x *Span) GetEndTime() *timestamppb.Timestamp { if x != nil { return x.EndTime } @@ -440,14 +435,14 @@ func (x *Span) GetResource() *v1.Resource { return nil } -func (x *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue { +func (x *Span) GetSameProcessAsParentSpan() *wrapperspb.BoolValue { if x != nil { return x.SameProcessAsParentSpan } return nil } -func (x *Span) GetChildSpanCount() *wrappers.UInt32Value { +func (x *Span) GetChildSpanCount() *wrapperspb.UInt32Value { if x != nil { return x.ChildSpanCount } @@ -952,7 +947,7 @@ type Span_TimeEvent struct { unknownFields protoimpl.UnknownFields // The time the event occurred. - Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + Time *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` // A `TimeEvent` can contain either an `Annotation` object or a // `MessageEvent` object, but not both. // @@ -994,7 +989,7 @@ func (*Span_TimeEvent) Descriptor() ([]byte, []int) { return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 2} } -func (x *Span_TimeEvent) GetTime() *timestamp.Timestamp { +func (x *Span_TimeEvent) GetTime() *timestamppb.Timestamp { if x != nil { return x.Time } @@ -1899,16 +1894,16 @@ var file_opencensus_proto_trace_v1_trace_proto_rawDesc = []byte{ 0x6c, 0x75, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x8c, 0x01, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x8f, 0x01, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x19, 0x4f, 0x70, 0x65, 0x6e, 0x43, - 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x63, - 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x43, + 0x65, 0x6e, 0x73, 0x75, 0x73, 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x54, 0x72, + 0x61, 0x63, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1947,10 +1942,10 @@ var file_opencensus_proto_trace_v1_trace_proto_goTypes = []interface{}{ (*Span_TimeEvent_MessageEvent)(nil), // 18: opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent (*StackTrace_StackFrame)(nil), // 19: opencensus.proto.trace.v1.StackTrace.StackFrame (*StackTrace_StackFrames)(nil), // 20: opencensus.proto.trace.v1.StackTrace.StackFrames - (*timestamp.Timestamp)(nil), // 21: google.protobuf.Timestamp + (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp (*v1.Resource)(nil), // 22: opencensus.proto.resource.v1.Resource - (*wrappers.BoolValue)(nil), // 23: google.protobuf.BoolValue - (*wrappers.UInt32Value)(nil), // 24: google.protobuf.UInt32Value + (*wrapperspb.BoolValue)(nil), // 23: google.protobuf.BoolValue + (*wrapperspb.UInt32Value)(nil), // 24: google.protobuf.UInt32Value } var file_opencensus_proto_trace_v1_trace_proto_depIdxs = []int32{ 9, // 0: opencensus.proto.trace.v1.Span.tracestate:type_name -> opencensus.proto.trace.v1.Span.Tracestate diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go index 0c19a4d2..ee62b2e3 100644 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.12.3 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opencensus/proto/trace/v1/trace_config.proto package v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // How spans should be sampled: // - Always off // - Always on @@ -432,7 +427,7 @@ var file_opencensus_proto_trace_v1_trace_config_proto_rawDesc = []byte{ 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x50, 0x41, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x22, 0x27, 0x0a, 0x13, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x71, 0x70, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x71, 0x70, 0x73, 0x42, 0x92, 0x01, 0x0a, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x71, 0x70, 0x73, 0x42, 0x95, 0x01, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, @@ -440,9 +435,9 @@ var file_opencensus_proto_trace_v1_trace_config_proto_rawDesc = []byte{ 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x19, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, - 0x75, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, + 0x75, 0x73, 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x54, 0x72, 0x61, 0x63, 0x65, + 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 792b4a60..8bf0e5b7 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -3,8 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,19 +47,20 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 00000000..94b9c443 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index 15c835d5..a9e0d45c 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,19 +16,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -50,10 +42,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index be8db5bf..3e8b1325 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 00000000..7e3145a2 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go similarity index 73% rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index ad14b807..9216e0a4 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -1,3 +1,5 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a8216..26df13bb 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7a..e86f1b5f 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 376e0ca2..1c1638fd 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -11,7 +12,7 @@ import ( // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. diff --git a/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/token/token.go b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/token/token.go index 67708e61..feff3e1a 100644 --- a/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/token/token.go +++ b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/token/token.go @@ -63,8 +63,46 @@ func getServicePrincipalToken(settings auth.EnvironmentSettings, resource string return &adal.ServicePrincipalToken{}, fmt.Errorf("authentication method currently unsupported") } + // federated OIDC JWT assertion + jwt, err := jwtLookup() + if err == nil { + clientID, isPresent := os.LookupEnv("AZURE_CLIENT_ID") + if !isPresent { + return &adal.ServicePrincipalToken{}, fmt.Errorf("failed to get client id from environment") + } + tenantID, isPresent := os.LookupEnv("AZURE_TENANT_ID") + if !isPresent { + return &adal.ServicePrincipalToken{}, fmt.Errorf("failed to get client id from environment") + } + + oAuthConfig, err := adal.NewOAuthConfig(settings.Environment.ActiveDirectoryEndpoint, tenantID) + if err != nil { + return &adal.ServicePrincipalToken{}, fmt.Errorf("failed to initialise OAuthConfig - %w", err) + } + + return adal.NewServicePrincipalTokenFromFederatedToken(*oAuthConfig, clientID, *jwt, resource) + } + // 4. MSI return adal.NewServicePrincipalTokenFromManagedIdentity(resource, &adal.ManagedIdentityOptions{ ClientID: os.Getenv("AZURE_CLIENT_ID"), }) } + +func jwtLookup() (*string, error) { + jwt, isPresent := os.LookupEnv("AZURE_FEDERATED_TOKEN") + if isPresent { + return &jwt, nil + } + + if jwtFile, isPresent := os.LookupEnv("AZURE_FEDERATED_TOKEN_FILE"); isPresent { + jwtBytes, err := os.ReadFile(jwtFile) + if err != nil { + return nil, err + } + jwt = string(jwtBytes) + return &jwt, nil + } + + return nil, fmt.Errorf("no JWT found") +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/alias.go b/vendor/github.com/cloudevents/sdk-go/v2/alias.go index ed64b4c0..2fbfaa9a 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/alias.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/alias.go @@ -135,8 +135,14 @@ var ( ToMessage = binding.ToMessage // Event Creation - NewEventFromHTTPRequest = http.NewEventFromHTTPRequest - NewEventFromHTTPResponse = http.NewEventFromHTTPResponse + + NewEventFromHTTPRequest = http.NewEventFromHTTPRequest + NewEventFromHTTPResponse = http.NewEventFromHTTPResponse + NewEventsFromHTTPRequest = http.NewEventsFromHTTPRequest + NewEventsFromHTTPResponse = http.NewEventsFromHTTPResponse + NewHTTPRequestFromEvent = http.NewHTTPRequestFromEvent + NewHTTPRequestFromEvents = http.NewHTTPRequestFromEvents + IsHTTPBatch = http.IsHTTPBatch // HTTP Messages diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go index 16611a3d..5070b729 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go @@ -19,6 +19,9 @@ const ( EncodingEvent // When the encoding is unknown (which means that the message is a non-event) EncodingUnknown + + // EncodingBatch is an instance of JSON Batched Events + EncodingBatch ) func (e Encoding) String() string { @@ -29,6 +32,8 @@ func (e Encoding) String() string { return "structured" case EncodingEvent: return "event" + case EncodingBatch: + return "batch" case EncodingUnknown: return "unknown" } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go index 2d840025..6bdd1842 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go @@ -7,6 +7,7 @@ package format import ( "encoding/json" + "errors" "fmt" "strings" @@ -41,12 +42,33 @@ func (jsonFmt) Unmarshal(b []byte, e *event.Event) error { return json.Unmarshal(b, e) } +// JSONBatch is the built-in "application/cloudevents-batch+json" format. +var JSONBatch = jsonBatchFmt{} + +type jsonBatchFmt struct{} + +func (jb jsonBatchFmt) MediaType() string { + return event.ApplicationCloudEventsBatchJSON +} + +// Marshal will return an error for jsonBatchFmt since the Format interface doesn't support batch Marshalling, and we +// know it's structured batch json, we'll go direct to the json.UnMarshall() (see `ToEvents()`) since that is the best +// way to support batch operations for now. +func (jb jsonBatchFmt) Marshal(e *event.Event) ([]byte, error) { + return nil, errors.New("not supported for batch events") +} + +func (jb jsonBatchFmt) Unmarshal(b []byte, e *event.Event) error { + return errors.New("not supported for batch events") +} + // built-in formats var formats map[string]Format func init() { formats = map[string]Format{} Add(JSON) + Add(JSONBatch) } // Lookup returns the format for contentType, or nil if not found. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go index 339a7833..d3332c15 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go @@ -8,6 +8,7 @@ package binding import ( "bytes" "context" + "encoding/json" "errors" "fmt" "io" @@ -21,6 +22,9 @@ import ( // ErrCannotConvertToEvent is a generic error when a conversion of a Message to an Event fails var ErrCannotConvertToEvent = errors.New("cannot convert message to event") +// ErrCannotConvertToEvents is a generic error when a conversion of a Message to a Batched Event fails +var ErrCannotConvertToEvents = errors.New("cannot convert message to batched events") + // ToEvent translates a Message with a valid Structured or Binary representation to an Event. // This function returns the Event generated from the Message and the original encoding of the message or // an error that points the conversion error. @@ -61,6 +65,21 @@ func ToEvent(ctx context.Context, message MessageReader, transformers ...Transfo return &e, Transformers(transformers).Transform((*EventMessage)(&e), encoder) } +// ToEvents translates a Batch Message and corresponding Reader data to a slice of Events. +// This function returns the Events generated from the body data, or an error that points +// to the conversion issue. +func ToEvents(ctx context.Context, message MessageReader, body io.Reader) ([]event.Event, error) { + messageEncoding := message.ReadEncoding() + if messageEncoding != EncodingBatch { + return nil, ErrCannotConvertToEvents + } + + // Since Format doesn't support batch Marshalling, and we know it's structured batch json, we'll go direct to the + // json.UnMarshall(), since that is the best way to support batch operations for now. + var events []event.Event + return events, json.NewDecoder(body).Decode(&events) +} + type messageToEventBuilder event.Event var _ StructuredWriter = (*messageToEventBuilder)(nil) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go index 6c4193f3..72d0e757 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go @@ -50,7 +50,7 @@ func validateExtensionName(key string) error { for _, c := range key { if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9')) { - return errors.New("bad key, CloudEvents attribute names MUST consist of lower-case letters ('a' to 'z') or digits ('0' to '9') from the ASCII character set") + return errors.New("bad key, CloudEvents attribute names MUST consist of lower-case letters ('a' to 'z'), upper-case letters ('A' to 'Z') or digits ('0' to '9') from the ASCII character set") } } return nil diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go index e7e51d03..7a7c36f9 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go @@ -92,6 +92,9 @@ func (m *Message) ReadEncoding() binding.Encoding { return binding.EncodingBinary } if m.format != nil { + if m.format == format.JSONBatch { + return binding.EncodingBatch + } return binding.EncodingStructured } return binding.EncodingUnknown diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go index d46a3346..350fc1cf 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go @@ -6,7 +6,9 @@ package http import ( + "bytes" "context" + "encoding/json" nethttp "net/http" "github.com/cloudevents/sdk-go/v2/binding" @@ -24,3 +26,64 @@ func NewEventFromHTTPResponse(resp *nethttp.Response) (*event.Event, error) { msg := NewMessageFromHttpResponse(resp) return binding.ToEvent(context.Background(), msg) } + +// NewEventsFromHTTPRequest returns a batched set of Events from a HTTP Request +func NewEventsFromHTTPRequest(req *nethttp.Request) ([]event.Event, error) { + msg := NewMessageFromHttpRequest(req) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewEventsFromHTTPResponse returns a batched set of Events from a HTTP Response +func NewEventsFromHTTPResponse(resp *nethttp.Response) ([]event.Event, error) { + msg := NewMessageFromHttpResponse(resp) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewHTTPRequestFromEvent creates a http.Request object that can be used with any http.Client for a singular event. +// This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvent(ctx context.Context, url string, event event.Event) (*nethttp.Request, error) { + if err := event.Validate(); err != nil { + return nil, err + } + + req, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, nil) + if err != nil { + return nil, err + } + if err := WriteRequest(ctx, (*binding.EventMessage)(&event), req); err != nil { + return nil, err + } + + return req, nil +} + +// NewHTTPRequestFromEvents creates a http.Request object that can be used with any http.Client for sending +// a batched set of events. This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvents(ctx context.Context, url string, events []event.Event) (*nethttp.Request, error) { + // Sending batch events is quite straightforward, as there is only JSON format, so a simple implementation. + for _, e := range events { + if err := e.Validate(); err != nil { + return nil, err + } + } + var buffer bytes.Buffer + err := json.NewEncoder(&buffer).Encode(events) + if err != nil { + return nil, err + } + + request, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, &buffer) + if err != nil { + return nil, err + } + + request.Header.Set(ContentType, event.ApplicationCloudEventsBatchJSON) + + return request, nil +} + +// IsHTTPBatch returns if the current http.Request or http.Response is a batch event operation, by checking the +// header `Content-Type` value. +func IsHTTPBatch(header nethttp.Header) bool { + return header.Get(ContentType) == event.ApplicationCloudEventsBatchJSON +} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index ca1b6773..6bc3ed17 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.6.18+unknown" + Version = "1.6.19+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go new file mode 100644 index 00000000..b071cea5 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -0,0 +1,690 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path" + "runtime" + "strings" + "sync" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" + "golang.org/x/sync/errgroup" +) + +type options struct { + chunkSize int + compressionLevel int + prioritizedFiles []string + missedPrioritizedFiles *[]string + compression Compression + ctx context.Context + minChunkSize int +} + +type Option func(o *options) error + +// WithChunkSize option specifies the chunk size of eStargz blob to build. +func WithChunkSize(chunkSize int) Option { + return func(o *options) error { + o.chunkSize = chunkSize + return nil + } +} + +// WithCompressionLevel option specifies the gzip compression level. +// The default is gzip.BestCompression. +// This option will be ignored if WithCompression option is used. +// See also: https://godoc.org/compress/gzip#pkg-constants +func WithCompressionLevel(level int) Option { + return func(o *options) error { + o.compressionLevel = level + return nil + } +} + +// WithPrioritizedFiles option specifies the list of prioritized files. +// These files must be complete paths that are absolute or relative to "/" +// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar" +// are treated as "/foo/bar". +func WithPrioritizedFiles(files []string) Option { + return func(o *options) error { + o.prioritizedFiles = files + return nil + } +} + +// WithAllowPrioritizeNotFound makes Build continue the execution even if some +// of prioritized files specified by WithPrioritizedFiles option aren't found +// in the input tar. Instead, this records all missed file names to the passed +// slice. +func WithAllowPrioritizeNotFound(missedFiles *[]string) Option { + return func(o *options) error { + if missedFiles == nil { + return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed") + } + o.missedPrioritizedFiles = missedFiles + return nil + } +} + +// WithCompression specifies compression algorithm to be used. +// Default is gzip. +func WithCompression(compression Compression) Option { + return func(o *options) error { + o.compression = compression + return nil + } +} + +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + +// WithMinChunkSize option specifies the minimal number of bytes of data +// must be written in one gzip stream. +// By increasing this number, one gzip stream can contain multiple files +// and it hopefully leads to smaller result blob. +// NOTE: This adds a TOC property that old reader doesn't understand. +func WithMinChunkSize(minChunkSize int) Option { + return func(o *options) error { + o.minChunkSize = minChunkSize + return nil + } +} + +// Blob is an eStargz blob. +type Blob struct { + io.ReadCloser + diffID digest.Digester + tocDigest digest.Digest +} + +// DiffID returns the digest of uncompressed blob. +// It is only valid to call DiffID after Close. +func (b *Blob) DiffID() digest.Digest { + return b.diffID.Digest() +} + +// TOCDigest returns the digest of uncompressed TOC JSON. +func (b *Blob) TOCDigest() digest.Digest { + return b.tocDigest +} + +// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd +// or plain tar) passed through the argument. If there are some prioritized files are listed in +// the option, these files are grouped as "prioritized" and can be used for runtime optimization +// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several +// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs. +func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { + var opts options + opts.compressionLevel = gzip.BestCompression // BestCompression by default + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + if opts.compression == nil { + opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) + } + layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() + defer func() { + if rErr != nil { + if err := layerFiles.CleanupAll(); err != nil { + rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) + } + } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } + }() + tarBlob, err := decompressBlob(tarBlob, layerFiles) + if err != nil { + return nil, err + } + entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles) + if err != nil { + return nil, err + } + var tarParts [][]*entry + if opts.minChunkSize > 0 { + // Each entry needs to know the size of the current gzip stream so they + // cannot be processed in parallel. + tarParts = [][]*entry{entries} + } else { + tarParts = divideEntries(entries, runtime.GOMAXPROCS(0)) + } + writers := make([]*Writer, len(tarParts)) + payloads := make([]*os.File, len(tarParts)) + var mu sync.Mutex + var eg errgroup.Group + for i, parts := range tarParts { + i, parts := i, parts + // builds verifiable stargz sub-blobs + eg.Go(func() error { + esgzFile, err := layerFiles.TempFile("", "esgzdata") + if err != nil { + return err + } + sw := NewWriterWithCompressor(esgzFile, opts.compression) + sw.ChunkSize = opts.chunkSize + sw.MinChunkSize = opts.minChunkSize + if sw.needsOpenGzEntries == nil { + sw.needsOpenGzEntries = make(map[string]struct{}) + } + for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} { + sw.needsOpenGzEntries[f] = struct{}{} + } + if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { + return err + } + mu.Lock() + writers[i] = sw + payloads[i] = esgzFile + mu.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + rErr = err + return nil, err + } + tocAndFooter, tocDgst, err := closeWithCombine(writers...) + if err != nil { + rErr = err + return nil, err + } + var rs []io.Reader + for _, p := range payloads { + fs, err := fileSectionReader(p) + if err != nil { + return nil, err + } + rs = append(rs, fs) + } + diffID := digest.Canonical.Digester() + pr, pw := io.Pipe() + go func() { + r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) + if err != nil { + pw.CloseWithError(err) + return + } + defer r.Close() + if _, err := io.Copy(diffID.Hash(), r); err != nil { + pw.CloseWithError(err) + return + } + pw.Close() + }() + return &Blob{ + ReadCloser: readCloser{ + Reader: pr, + closeFunc: layerFiles.CleanupAll, + }, + tocDigest: tocDgst, + diffID: diffID, + }, nil +} + +// closeWithCombine takes unclosed Writers and close them. This also returns the +// toc that combined all Writers into. +// Writers doesn't write TOC and footer to the underlying writers so they can be +// combined into a single eStargz and tocAndFooter returned by this function can +// be appended at the tail of that combined blob. +func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { + if len(ws) == 0 { + return nil, "", fmt.Errorf("at least one writer must be passed") + } + for _, w := range ws { + if w.closed { + return nil, "", fmt.Errorf("writer must be unclosed") + } + defer func(w *Writer) { w.closed = true }(w) + if err := w.closeGz(); err != nil { + return nil, "", err + } + if err := w.bw.Flush(); err != nil { + return nil, "", err + } + } + var ( + mtoc = new(JTOC) + currentOffset int64 + ) + mtoc.Version = ws[0].toc.Version + for _, w := range ws { + for _, e := range w.toc.Entries { + // Recalculate Offset of non-empty files/chunks + if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" { + e.Offset += currentOffset + } + mtoc.Entries = append(mtoc.Entries, e) + } + if w.toc.Version > mtoc.Version { + mtoc.Version = w.toc.Version + } + currentOffset += w.cw.n + } + + return tocAndFooter(ws[0].compressor, mtoc, currentOffset) +} + +func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) { + buf := new(bytes.Buffer) + tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil) + if err != nil { + return nil, "", err + } + return buf, tocDigest, nil +} + +// divideEntries divides passed entries to the parts at least the number specified by the +// argument. +func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) { + var estimatedSize int64 + for _, e := range entries { + estimatedSize += e.header.Size + } + unitSize := estimatedSize / int64(minPartsNum) + var ( + nextEnd = unitSize + offset int64 + ) + set = append(set, []*entry{}) + for _, e := range entries { + set[len(set)-1] = append(set[len(set)-1], e) + offset += e.header.Size + if offset > nextEnd { + set = append(set, []*entry{}) + nextEnd += unitSize + } + } + return +} + +var errNotFound = errors.New("not found") + +// sortEntries reads the specified tar blob and returns a list of tar entries. +// If some of prioritized files are specified, the list starts from these +// files with keeping the order specified by the argument. +func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) { + + // Import tar file. + intar, err := importTar(in) + if err != nil { + return nil, fmt.Errorf("failed to sort: %w", err) + } + + // Sort the tar file respecting to the prioritized files list. + sorted := &tarFile{} + for _, l := range prioritized { + if err := moveRec(l, intar, sorted); err != nil { + if errors.Is(err, errNotFound) && missedPrioritized != nil { + *missedPrioritized = append(*missedPrioritized, l) + continue // allow not found + } + return nil, fmt.Errorf("failed to sort tar entries: %w", err) + } + } + if len(prioritized) == 0 { + sorted.add(&entry{ + header: &tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } else { + sorted.add(&entry{ + header: &tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } + + // Dump all entry and concatinate them. + return append(sorted.dump(), intar.dump()...), nil +} + +// readerFromEntries returns a reader of tar archive that contains entries passed +// through the arguments. +func readerFromEntries(entries ...*entry) io.Reader { + pr, pw := io.Pipe() + go func() { + tw := tar.NewWriter(pw) + defer tw.Close() + for _, entry := range entries { + if err := tw.WriteHeader(entry.header); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) + return + } + if _, err := io.Copy(tw, entry.payload); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) + return + } + } + pw.Close() + }() + return pr +} + +func importTar(in io.ReaderAt) (*tarFile, error) { + tf := &tarFile{} + pw, err := newCountReadSeeker(in) + if err != nil { + return nil, fmt.Errorf("failed to make position watcher: %w", err) + } + tr := tar.NewReader(pw) + + // Walk through all nodes. + for { + // Fetch and parse next header. + h, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } else { + return nil, fmt.Errorf("failed to parse tar file, %w", err) + } + } + switch cleanEntryName(h.Name) { + case PrefetchLandmark, NoPrefetchLandmark: + // Ignore existing landmark + continue + } + + // Add entry. If it already exists, replace it. + if _, ok := tf.get(h.Name); ok { + tf.remove(h.Name) + } + tf.add(&entry{ + header: h, + payload: io.NewSectionReader(in, pw.currentPos(), h.Size), + }) + } + + return tf, nil +} + +func moveRec(name string, in *tarFile, out *tarFile) error { + name = cleanEntryName(name) + if name == "" { // root directory. stop recursion. + if e, ok := in.get(name); ok { + // entry of the root directory exists. we should move it as well. + // this case will occur if tar entries are prefixed with "./", "/", etc. + out.add(e) + in.remove(name) + } + return nil + } + + _, okIn := in.get(name) + _, okOut := out.get(name) + if !okIn && !okOut { + return fmt.Errorf("file: %q: %w", name, errNotFound) + } + + parent, _ := path.Split(strings.TrimSuffix(name, "/")) + if err := moveRec(parent, in, out); err != nil { + return err + } + if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { + if err := moveRec(e.header.Linkname, in, out); err != nil { + return err + } + } + if e, ok := in.get(name); ok { + out.add(e) + in.remove(name) + } + return nil +} + +type entry struct { + header *tar.Header + payload io.ReadSeeker +} + +type tarFile struct { + index map[string]*entry + stream []*entry +} + +func (f *tarFile) add(e *entry) { + if f.index == nil { + f.index = make(map[string]*entry) + } + f.index[cleanEntryName(e.header.Name)] = e + f.stream = append(f.stream, e) +} + +func (f *tarFile) remove(name string) { + name = cleanEntryName(name) + if f.index != nil { + delete(f.index, name) + } + var filtered []*entry + for _, e := range f.stream { + if cleanEntryName(e.header.Name) == name { + continue + } + filtered = append(filtered, e) + } + f.stream = filtered +} + +func (f *tarFile) get(name string) (e *entry, ok bool) { + if f.index == nil { + return nil, false + } + e, ok = f.index[cleanEntryName(name)] + return +} + +func (f *tarFile) dump() []*entry { + return f.stream +} + +type readCloser struct { + io.Reader + closeFunc func() error +} + +func (rc readCloser) Close() error { + return rc.closeFunc() +} + +func fileSectionReader(file *os.File) (*io.SectionReader, error) { + info, err := file.Stat() + if err != nil { + return nil, err + } + return io.NewSectionReader(file, 0, info.Size()), nil +} + +func newTempFiles() *tempFiles { + return &tempFiles{} +} + +type tempFiles struct { + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once +} + +func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { + f, err := os.CreateTemp(dir, pattern) + if err != nil { + return nil, err + } + tf.filesMu.Lock() + tf.files = append(tf.files, f) + tf.filesMu.Unlock() + return f, nil +} + +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { + tf.filesMu.Lock() + defer tf.filesMu.Unlock() + var allErr []error + for _, f := range tf.files { + if err := f.Close(); err != nil { + allErr = append(allErr, err) + } + if err := os.Remove(f.Name()); err != nil { + allErr = append(allErr, err) + } + } + tf.files = nil + return errorutil.Aggregate(allErr) +} + +func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) { + pos := int64(0) + return &countReadSeeker{r: r, cPos: &pos}, nil +} + +type countReadSeeker struct { + r io.ReaderAt + cPos *int64 + + mu sync.Mutex +} + +func (cr *countReadSeeker) Read(p []byte) (int, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + n, err := cr.r.ReadAt(p, *cr.cPos) + if err == nil { + *cr.cPos += int64(n) + } + return n, err +} + +func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + switch whence { + default: + return 0, fmt.Errorf("Unknown whence: %v", whence) + case io.SeekStart: + case io.SeekCurrent: + offset += *cr.cPos + case io.SeekEnd: + return 0, fmt.Errorf("Unsupported whence: %v", whence) + } + + if offset < 0 { + return 0, fmt.Errorf("invalid offset") + } + *cr.cPos = offset + return offset, nil +} + +func (cr *countReadSeeker) currentPos() int64 { + cr.mu.Lock() + defer cr.mu.Unlock() + + return *cr.cPos +} + +func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { + if org.Size() < 4 { + return org, nil + } + src := make([]byte, 4) + if _, err := org.Read(src); err != nil && err != io.EOF { + return nil, err + } + var dR io.Reader + if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { + // gzip + dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dgR.Close() + dR = io.Reader(dgR) + } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) { + // zstd + dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dzR.Close() + dR = io.Reader(dzR) + } else { + // uncompressed + return io.NewSectionReader(org, 0, org.Size()), nil + } + b, err := tmp.TempFile("", "uncompresseddata") + if err != nil { + return nil, err + } + if _, err := io.Copy(b, dR); err != nil { + return nil, err + } + return fileSectionReader(b) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go new file mode 100644 index 00000000..6de78b02 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errorutil + +import ( + "errors" + "fmt" + "strings" +) + +// Aggregate combines a list of errors into a single new error. +func Aggregate(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + points := make([]string, len(errs)+1) + points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs)) + for i, err := range errs { + points[i+1] = fmt.Sprintf("* %s", err) + } + return errors.New(strings.Join(points, "\n\t")) + } +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go new file mode 100644 index 00000000..f4d55465 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -0,0 +1,1223 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + "os" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + digest "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" +) + +// A Reader permits random access reads from a stargz file. +type Reader struct { + sr *io.SectionReader + toc *JTOC + tocDigest digest.Digest + + // m stores all non-chunk entries, keyed by name. + m map[string]*TOCEntry + + // chunks stores all TOCEntry values for regular files that + // are split up. For a file with a single chunk, it's only + // stored in m. + chunks map[string][]*TOCEntry + + decompressor Decompressor +} + +type openOpts struct { + tocOffset int64 + decompressors []Decompressor + telemetry *Telemetry +} + +// OpenOption is an option used during opening the layer +type OpenOption func(o *openOpts) error + +// WithTOCOffset option specifies the offset of TOC +func WithTOCOffset(tocOffset int64) OpenOption { + return func(o *openOpts) error { + o.tocOffset = tocOffset + return nil + } +} + +// WithDecompressors option specifies decompressors to use. +// Default is gzip-based decompressor. +func WithDecompressors(decompressors ...Decompressor) OpenOption { + return func(o *openOpts) error { + o.decompressors = decompressors + return nil + } +} + +// WithTelemetry option specifies the telemetry hooks +func WithTelemetry(telemetry *Telemetry) OpenOption { + return func(o *openOpts) error { + o.telemetry = telemetry + return nil + } +} + +// MeasureLatencyHook is a func which takes start time and records the diff +type MeasureLatencyHook func(time.Time) + +// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record +// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...) +type Telemetry struct { + GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds) + GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds) + DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds) +} + +// Open opens a stargz file for reading. +// The behavior is configurable using options. +// +// Note that each entry name is normalized as the path that is relative to root. +func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { + var opts openOpts + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + + gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} + decompressors := append(gzipCompressors, opts.decompressors...) + + // Determine the size to fetch. Try to fetch as many bytes as possible. + fetchSize := maxFooterSize(sr.Size(), decompressors...) + if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize { + if maybeTocOffset > sr.Size() { + return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size()) + } + fetchSize = sr.Size() - maybeTocOffset + } + + start := time.Now() // before getting layer footer + footer := make([]byte, fetchSize) + if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil { + return nil, fmt.Errorf("error reading footer: %v", err) + } + if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil { + opts.telemetry.GetFooterLatency(start) + } + + var allErr []error + var found bool + var r *Reader + for _, d := range decompressors { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + maybeTocBytes := footer[:fOffset] + _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:]) + if err != nil { + allErr = append(allErr, err) + continue + } + if tocOffset >= 0 && tocSize <= 0 { + tocSize = sr.Size() - tocOffset - fSize + } + if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) { + maybeTocBytes = maybeTocBytes[:tocSize] + } + r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) + if err == nil { + found = true + break + } + allErr = append(allErr, err) + } + if !found { + return nil, errorutil.Aggregate(allErr) + } + if err := r.initFields(); err != nil { + return nil, fmt.Errorf("failed to initialize fields of entries: %v", err) + } + return r, nil +} + +// OpenFooter extracts and parses footer from the given blob. +// only supports gzip-based eStargz. +func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) { + if sr.Size() < FooterSize && sr.Size() < legacyFooterSize { + return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size()) + } + var footer [FooterSize]byte + if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil { + return 0, 0, fmt.Errorf("error reading footer: %v", err) + } + var allErr []error + for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + _, tocOffset, _, err := d.ParseFooter(footer[fOffset:]) + if err == nil { + return tocOffset, fSize, err + } + allErr = append(allErr, err) + } + return 0, 0, errorutil.Aggregate(allErr) +} + +// initFields populates the Reader from r.toc after decoding it from +// JSON. +// +// Unexported fields are populated and TOCEntry fields that were +// implicit in the JSON are populated. +func (r *Reader) initFields() error { + r.m = make(map[string]*TOCEntry, len(r.toc.Entries)) + r.chunks = make(map[string][]*TOCEntry) + var lastPath string + uname := map[int]string{} + gname := map[int]string{} + var lastRegEnt *TOCEntry + var chunkTopIndex int + for i, ent := range r.toc.Entries { + ent.Name = cleanEntryName(ent.Name) + switch ent.Type { + case "reg", "chunk": + if ent.Offset != r.toc.Entries[chunkTopIndex].Offset { + chunkTopIndex = i + } + ent.chunkTopIndex = chunkTopIndex + } + if ent.Type == "reg" { + lastRegEnt = ent + } + if ent.Type == "chunk" { + ent.Name = lastPath + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + if ent.ChunkSize == 0 && lastRegEnt != nil { + ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset + } + } else { + lastPath = ent.Name + + if ent.Uname != "" { + uname[ent.UID] = ent.Uname + } else { + ent.Uname = uname[ent.UID] + } + if ent.Gname != "" { + gname[ent.GID] = ent.Gname + } else { + ent.Gname = uname[ent.GID] + } + + ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339) + + if ent.Type == "dir" { + ent.NumLink++ // Parent dir links to this directory + } + r.m[ent.Name] = ent + } + if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size { + r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1) + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + } + if ent.ChunkSize == 0 && ent.Size != 0 { + ent.ChunkSize = ent.Size + } + } + + // Populate children, add implicit directories: + for _, ent := range r.toc.Entries { + if ent.Type == "chunk" { + continue + } + // add "foo/": + // add "foo" child to "" (creating "" if necessary) + // + // add "foo/bar/": + // add "bar" child to "foo" (creating "foo" if necessary) + // + // add "foo/bar.txt": + // add "bar.txt" child to "foo" (creating "foo" if necessary) + // + // add "a/b/c/d/e/f.txt": + // create "a/b/c/d/e" node + // add "f.txt" child to "e" + + name := ent.Name + pdirName := parentDir(name) + if name == pdirName { + // This entry and its parent are the same. + // Ignore this for avoiding infinite loop of the reference. + // The example case where this can occur is when tar contains the root + // directory itself (e.g. "./", "/"). + continue + } + pdir := r.getOrCreateDir(pdirName) + ent.NumLink++ // at least one name(ent.Name) references this entry. + if ent.Type == "hardlink" { + org, err := r.getSource(ent) + if err != nil { + return err + } + org.NumLink++ // original entry is referenced by this ent.Name. + ent = org + } + pdir.addChild(path.Base(name), ent) + } + + lastOffset := r.sr.Size() + for i := len(r.toc.Entries) - 1; i >= 0; i-- { + e := r.toc.Entries[i] + if e.isDataType() { + e.nextOffset = lastOffset + } + if e.Offset != 0 && e.InnerOffset == 0 { + lastOffset = e.Offset + } + } + + return nil +} + +func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) { + if ent.Type == "hardlink" { + org, ok := r.m[cleanEntryName(ent.LinkName)] + if !ok { + return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName) + } + ent, err = r.getSource(org) + if err != nil { + return nil, err + } + } + return ent, nil +} + +func parentDir(p string) string { + dir, _ := path.Split(p) + return strings.TrimSuffix(dir, "/") +} + +func (r *Reader) getOrCreateDir(d string) *TOCEntry { + e, ok := r.m[d] + if !ok { + e = &TOCEntry{ + Name: d, + Type: "dir", + Mode: 0755, + NumLink: 2, // The directory itself(.) and the parent link to this directory. + } + r.m[d] = e + if d != "" { + pdir := r.getOrCreateDir(parentDir(d)) + pdir.addChild(path.Base(d), e) + } + } + return e +} + +func (r *Reader) TOCDigest() digest.Digest { + return r.tocDigest +} + +// VerifyTOC checks that the TOC JSON in the passed blob matches the +// passed digests and that the TOC JSON contains digests for all chunks +// contained in the blob. If the verification succceeds, this function +// returns TOCEntryVerifier which holds all chunk digests in the stargz blob. +func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) { + // Verify the digest of TOC JSON + if r.tocDigest != tocDigest { + return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest) + } + return r.Verifiers() +} + +// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases +// because this doesn't verify TOC. +func (r *Reader) Verifiers() (TOCEntryVerifier, error) { + chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest + regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest + var chunkDigestMapIncomplete bool + var regDigestMapIncomplete bool + var containsChunk bool + for _, e := range r.toc.Entries { + if e.Type != "reg" && e.Type != "chunk" { + continue + } + + // offset must be unique in stargz blob + _, dOK := chunkDigestMap[e.Offset] + _, rOK := regDigestMap[e.Offset] + if dOK || rOK { + return nil, fmt.Errorf("offset %d found twice", e.Offset) + } + + if e.Type == "reg" { + if e.Size == 0 { + continue // ignores empty file + } + + // record the digest of regular file payload + if e.Digest != "" { + d, err := digest.Parse(e.Digest) + if err != nil { + return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err) + } + regDigestMap[e.Offset] = d + } else { + regDigestMapIncomplete = true + } + } else { + containsChunk = true // this layer contains "chunk" entries. + } + + // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of + // chunked file) + if e.ChunkDigest != "" { + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err) + } + chunkDigestMap[e.Offset] = d + } else { + chunkDigestMapIncomplete = true + } + } + + if chunkDigestMapIncomplete { + // Though some chunk digests are not found, if this layer doesn't contain + // "chunk"s and all digest of "reg" files are recorded, we can use them instead. + if !containsChunk && !regDigestMapIncomplete { + return &verifier{digestMap: regDigestMap}, nil + } + return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON") + } + + return &verifier{digestMap: chunkDigestMap}, nil +} + +// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by +// offset of the chunk. +type verifier struct { + digestMap map[int64]digest.Digest + digestMapMu sync.Mutex +} + +// Verifier returns a content verifier specified by TOCEntry. +func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) { + v.digestMapMu.Lock() + defer v.digestMapMu.Unlock() + d, ok := v.digestMap[ce.Offset] + if !ok { + return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered", + ce.Offset, ce.ChunkSize) + } + return d.Verifier(), nil +} + +// ChunkEntryForOffset returns the TOCEntry containing the byte of the +// named file at the given offset within the file. +// Name must be absolute path or one that is relative to root. +func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) { + name = cleanEntryName(name) + e, ok = r.Lookup(name) + if !ok || !e.isDataType() { + return nil, false + } + ents := r.chunks[name] + if len(ents) < 2 { + if offset >= e.ChunkSize { + return nil, false + } + return e, true + } + i := sort.Search(len(ents), func(i int) bool { + e := ents[i] + return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize) + }) + if i == len(ents) { + return nil, false + } + return ents[i], true +} + +// Lookup returns the Table of Contents entry for the given path. +// +// To get the root directory, use the empty string. +// Path must be absolute path or one that is relative to root. +func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { + path = cleanEntryName(path) + if r == nil { + return + } + e, ok = r.m[path] + if ok && e.Type == "hardlink" { + var err error + e, err = r.getSource(e) + if err != nil { + return nil, false + } + } + return +} + +// OpenFile returns the reader of the specified file payload. +// +// Name must be absolute path or one that is relative to root. +func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) newFileReader(name string) (*fileReader, error) { + name = cleanEntryName(name) + ent, ok := r.Lookup(name) + if !ok { + // TODO: come up with some error plan. This is lazy: + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: os.ErrNotExist, + } + } + if ent.Type != "reg" { + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: errors.New("not a regular file"), + } + } + return &fileReader{ + r: r, + size: ent.Size, + ents: r.getChunks(ent), + }, nil +} + +func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err + } + fr.preRead = preRead + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { + if ents, ok := r.chunks[ent.Name]; ok { + return ents + } + return []*TOCEntry{ent} +} + +type fileReader struct { + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries + preRead func(*TOCEntry, io.Reader) error +} + +func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { + if off >= fr.size { + return 0, io.EOF + } + if off < 0 { + return 0, errors.New("invalid offset") + } + var i int + if len(fr.ents) > 1 { + i = sort.Search(len(fr.ents), func(i int) bool { + return fr.ents[i].ChunkOffset >= off + }) + if i == len(fr.ents) { + i = len(fr.ents) - 1 + } + } + ent := fr.ents[i] + if ent.ChunkOffset > off { + if i == 0 { + return 0, errors.New("internal error; first chunk offset is non-zero") + } + ent = fr.ents[i-1] + } + + // If ent is a chunk of a large file, adjust the ReadAt + // offset by the chunk's offset. + off -= ent.ChunkOffset + + finalEnt := fr.ents[len(fr.ents)-1] + compressedOff := ent.Offset + // compressedBytesRemain is the number of compressed bytes in this + // file remaining, over 1+ chunks. + compressedBytesRemain := finalEnt.NextOffset() - compressedOff + + sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain) + + const maxRead = 2 << 20 + var bufSize = maxRead + if compressedBytesRemain < maxRead { + bufSize = int(compressedBytesRemain) + } + + br := bufio.NewReaderSize(sr, bufSize) + if _, err := br.Peek(bufSize); err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err) + } + + dr, err := fr.r.decompressor.Reader(br) + if err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) + } + defer dr.Close() + + if fr.preRead == nil { + if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil { + return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err) + } + return io.ReadFull(dr, p) + } + + var retN int + var retErr error + var found bool + var nr int64 + for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] { + if !e.isDataType() { + continue + } + if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset { + break + } + if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr { + return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err) + } + nr = e.InnerOffset + if e == ent { + found = true + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { + return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err) + } + retN, retErr = io.ReadFull(dr, p) + nr += off + int64(retN) + continue + } + cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)} + if err := fr.preRead(e, cr); err != nil { + return 0, fmt.Errorf("failed to pre read: %w", err) + } + nr += cr.n + } + if !found { + return 0, fmt.Errorf("fileReader.ReadAt: target entry not found") + } + return retN, retErr +} + +// A Writer writes stargz files. +// +// Use NewWriter to create a new Writer. +type Writer struct { + bw *bufio.Writer + cw *countWriter + toc *JTOC + diffHash hash.Hash // SHA-256 of uncompressed tar + + closed bool + gz io.WriteCloser + lastUsername map[int]string + lastGroupname map[int]string + compressor Compressor + + uncompressedCounter *countWriteFlusher + + // ChunkSize optionally controls the maximum number of bytes + // of data of a regular file that can be written in one gzip + // stream before a new gzip stream is started. + // Zero means to use a default, currently 4 MiB. + ChunkSize int + + // MinChunkSize optionally controls the minimum number of bytes + // of data must be written in one gzip stream before a new gzip + // NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand. + MinChunkSize int + + needsOpenGzEntries map[string]struct{} +} + +// currentCompressionWriter writes to the current w.gz field, which can +// change throughout writing a tar entry. +// +// Additionally, it updates w's SHA-256 of the uncompressed bytes +// of the tar file. +type currentCompressionWriter struct{ w *Writer } + +func (ccw currentCompressionWriter) Write(p []byte) (int, error) { + ccw.w.diffHash.Write(p) + if ccw.w.gz == nil { + if err := ccw.w.condOpenGz(); err != nil { + return 0, err + } + } + return ccw.w.gz.Write(p) +} + +func (w *Writer) chunkSize() int { + if w.ChunkSize <= 0 { + return 4 << 20 + } + return w.ChunkSize +} + +// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob. +// TOC JSON and footer are removed. +func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { + footerSize := c.FooterSize() + if sr.Size() < footerSize { + return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize) + } + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + return nil, err + } + blobPayloadSize, _, _, err := c.ParseFooter(footer) + if err != nil { + return nil, fmt.Errorf("failed to parse footer: %w", err) + } + if blobPayloadSize < 0 { + blobPayloadSize = sr.Size() + } + return c.Reader(io.LimitReader(sr, blobPayloadSize)) +} + +// NewWriter returns a new stargz writer (gzip-based) writing to w. +// +// The writer must be closed to write its trailing table of contents. +func NewWriter(w io.Writer) *Writer { + return NewWriterLevel(w, gzip.BestCompression) +} + +// NewWriterLevel returns a new stargz writer (gzip-based) writing to w. +// The compression level is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterLevel(w io.Writer, compressionLevel int) *Writer { + return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel)) +} + +// NewWriterWithCompressor returns a new stargz writer writing to w. +// The compression method is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { + bw := bufio.NewWriter(w) + cw := &countWriter{w: bw} + return &Writer{ + bw: bw, + cw: cw, + toc: &JTOC{Version: 1}, + diffHash: sha256.New(), + compressor: c, + uncompressedCounter: &countWriteFlusher{}, + } +} + +// Close writes the stargz's table of contents and flushes all the +// buffers, returning any error. +func (w *Writer) Close() (digest.Digest, error) { + if w.closed { + return "", nil + } + defer func() { w.closed = true }() + + if err := w.closeGz(); err != nil { + return "", err + } + + // Write the TOC index and footer. + tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash) + if err != nil { + return "", err + } + if err := w.bw.Flush(); err != nil { + return "", err + } + + return tocDigest, nil +} + +func (w *Writer) closeGz() error { + if w.closed { + return errors.New("write on closed Writer") + } + if w.gz != nil { + if err := w.gz.Close(); err != nil { + return err + } + w.gz = nil + } + return nil +} + +func (w *Writer) flushGz() error { + if w.closed { + return errors.New("flush on closed Writer") + } + if w.gz != nil { + if f, ok := w.gz.(interface { + Flush() error + }); ok { + return f.Flush() + } + } + return nil +} + +// nameIfChanged returns name, unless it was the already the value of (*mp)[id], +// in which case it returns the empty string. +func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { + if name == "" { + return "" + } + if *mp == nil { + *mp = make(map[int]string) + } + if (*mp)[id] == name { + return "" + } + (*mp)[id] = name + return name +} + +func (w *Writer) condOpenGz() (err error) { + if w.gz == nil { + w.gz, err = w.compressor.Writer(w.cw) + if w.gz != nil { + w.gz = w.uncompressedCounter.register(w.gz) + } + } + return +} + +// AppendTar reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +func (w *Writer) AppendTar(r io.Reader) error { + return w.appendTar(r, false) +} + +// AppendTarLossLess reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +// +// The difference of this func with AppendTar is that this writes +// the input tar stream into w without any modification (e.g. to header bytes). +// +// Note that if the input tar stream already contains TOC JSON, this returns +// error because w cannot overwrite the TOC JSON to the one generated by w without +// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz, +// you shoud decompress it and remove TOC JSON in advance. +func (w *Writer) AppendTarLossLess(r io.Reader) error { + return w.appendTar(r, true) +} + +func (w *Writer) appendTar(r io.Reader, lossless bool) error { + var src io.Reader + br := bufio.NewReader(r) + if isGzip(br) { + zr, _ := gzip.NewReader(br) + src = zr + } else { + src = io.Reader(br) + } + dst := currentCompressionWriter{w} + var tw *tar.Writer + if !lossless { + tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode. + } + tr := tar.NewReader(src) + if lossless { + tr.RawAccounting = true + } + prevOffset := w.cw.n + var prevOffsetUncompressed int64 + for { + h, err := tr.Next() + if err == io.EOF { + if lossless { + if remain := tr.RawBytes(); len(remain) > 0 { + // Collect the remaining null bytes. + // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53 + if _, err := dst.Write(remain); err != nil { + return err + } + } + } + break + } + if err != nil { + return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err) + } + if cleanEntryName(h.Name) == TOCTarName { + // It is possible for a layer to be "stargzified" twice during the + // distribution lifecycle. So we reserve "TOCTarName" here to avoid + // duplicated entries in the resulting layer. + if lossless { + // We cannot handle this in lossless way. + return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append") + } + continue + } + + xattrs := make(map[string][]byte) + const xattrPAXRecordsPrefix = "SCHILY.xattr." + if h.PAXRecords != nil { + for k, v := range h.PAXRecords { + if strings.HasPrefix(k, xattrPAXRecordsPrefix) { + xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v) + } + } + } + ent := &TOCEntry{ + Name: h.Name, + Mode: h.Mode, + UID: h.Uid, + GID: h.Gid, + Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname), + Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname), + ModTime3339: formatModtime(h.ModTime), + Xattrs: xattrs, + } + if err := w.condOpenGz(); err != nil { + return err + } + if tw != nil { + if err := tw.WriteHeader(h); err != nil { + return err + } + } else { + if _, err := dst.Write(tr.RawBytes()); err != nil { + return err + } + } + switch h.Typeflag { + case tar.TypeLink: + ent.Type = "hardlink" + ent.LinkName = h.Linkname + case tar.TypeSymlink: + ent.Type = "symlink" + ent.LinkName = h.Linkname + case tar.TypeDir: + ent.Type = "dir" + case tar.TypeReg: + ent.Type = "reg" + ent.Size = h.Size + case tar.TypeChar: + ent.Type = "char" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeBlock: + ent.Type = "block" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeFifo: + ent.Type = "fifo" + default: + return fmt.Errorf("unsupported input tar entry %q", h.Typeflag) + } + + // We need to keep a reference to the TOC entry for regular files, so that we + // can fill the digest later. + var regFileEntry *TOCEntry + var payloadDigest digest.Digester + if h.Typeflag == tar.TypeReg { + regFileEntry = ent + payloadDigest = digest.Canonical.Digester() + } + + if h.Typeflag == tar.TypeReg && ent.Size > 0 { + var written int64 + totalSize := ent.Size // save it before we destroy ent + tee := io.TeeReader(tr, payloadDigest.Hash()) + for written < totalSize { + chunkSize := int64(w.chunkSize()) + remain := totalSize - written + if remain < chunkSize { + chunkSize = remain + } else { + ent.ChunkSize = chunkSize + } + + // We flush the underlying compression writer here to correctly calculate "w.cw.n". + if err := w.flushGz(); err != nil { + return err + } + if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) { + if err := w.closeGz(); err != nil { + return err + } + ent.Offset = w.cw.n + prevOffset = ent.Offset + prevOffsetUncompressed = w.uncompressedCounter.n + } else { + ent.Offset = prevOffset + ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed + } + + ent.ChunkOffset = written + chunkDigest := digest.Canonical.Digester() + + if err := w.condOpenGz(); err != nil { + return err + } + + teeChunk := io.TeeReader(tee, chunkDigest.Hash()) + var out io.Writer + if tw != nil { + out = tw + } else { + out = dst + } + if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil { + return fmt.Errorf("error copying %q: %v", h.Name, err) + } + ent.ChunkDigest = chunkDigest.Digest().String() + w.toc.Entries = append(w.toc.Entries, ent) + written += chunkSize + ent = &TOCEntry{ + Name: h.Name, + Type: "chunk", + } + } + } else { + w.toc.Entries = append(w.toc.Entries, ent) + } + if payloadDigest != nil { + regFileEntry.Digest = payloadDigest.Digest().String() + } + if tw != nil { + if err := tw.Flush(); err != nil { + return err + } + } + } + remainDest := io.Discard + if lossless { + remainDest = dst // Preserve the remaining bytes in lossless mode + } + _, err := io.Copy(remainDest, src) + return err +} + +func (w *Writer) needsOpenGz(ent *TOCEntry) bool { + if ent.Type != "reg" { + return false + } + if w.needsOpenGzEntries == nil { + return false + } + _, ok := w.needsOpenGzEntries[ent.Name] + return ok +} + +// DiffID returns the SHA-256 of the uncompressed tar bytes. +// It is only valid to call DiffID after Close. +func (w *Writer) DiffID() string { + return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil)) +} + +func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { + for _, d := range decompressors { + if s := d.FooterSize(); res < s && s <= blobSize { + res = s + } + } + return +} + +func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { + if tocOff < 0 { + // This means that TOC isn't contained in the blob. + // We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from + // the external location. + start := time.Now() + toc, tocDgst, err := d.ParseTOC(nil) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } + if len(tocBytes) > 0 { + start := time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err == nil { + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } + } + + start := time.Now() + tocBytes = make([]byte, tocSize) + if _, err := sr.ReadAt(tocBytes, tocOff); err != nil { + return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err) + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + start = time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil +} + +func formatModtime(t time.Time) string { + if t.IsZero() || t.Unix() == 0 { + return "" + } + return t.UTC().Round(time.Second).Format(time.RFC3339) +} + +func cleanEntryName(name string) string { + // Use path.Clean to consistently deal with path separators across platforms. + return strings.TrimPrefix(path.Clean("/"+name), "/") +} + +// countWriter counts how many bytes have been written to its wrapped +// io.Writer. +type countWriter struct { + w io.Writer + n int64 +} + +func (cw *countWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + return +} + +type countWriteFlusher struct { + io.WriteCloser + n int64 +} + +func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser { + wc.WriteCloser = w + return wc +} + +func (wc *countWriteFlusher) Write(p []byte) (n int, err error) { + n, err = wc.WriteCloser.Write(p) + wc.n += int64(n) + return +} + +func (wc *countWriteFlusher) Flush() error { + if f, ok := wc.WriteCloser.(interface { + Flush() error + }); ok { + return f.Flush() + } + return nil +} + +func (wc *countWriteFlusher) Close() error { + err := wc.WriteCloser.Close() + wc.WriteCloser = nil + return err +} + +// isGzip reports whether br is positioned right before an upcoming gzip stream. +// It does not consume any bytes from br. +func isGzip(br *bufio.Reader) bool { + const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + ) + peek, _ := br.Peek(3) + return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate +} + +func positive(n int64) int64 { + if n < 0 { + return 0 + } + return n +} + +type countReader struct { + r io.Reader + n int64 +} + +func (cr *countReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + cr.n += int64(n) + return +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go new file mode 100644 index 00000000..f24afe32 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -0,0 +1,237 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + + digest "github.com/opencontainers/go-digest" +) + +type gzipCompression struct { + *GzipCompressor + *GzipDecompressor +} + +func newGzipCompressionWithLevel(level int) Compression { + return &gzipCompression{ + &GzipCompressor{level}, + &GzipDecompressor{}, + } +} + +func NewGzipCompressor() *GzipCompressor { + return &GzipCompressor{gzip.BestCompression} +} + +func NewGzipCompressorWithLevel(level int) *GzipCompressor { + return &GzipCompressor{level} +} + +type GzipCompressor struct { + compressionLevel int +} + +func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) { + return gzip.NewWriterLevel(w, gc.compressionLevel) +} + +func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) { + tocJSON, err := json.MarshalIndent(toc, "", "\t") + if err != nil { + return "", err + } + gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel) + gw := io.Writer(gz) + if diffHash != nil { + gw = io.MultiWriter(gz, diffHash) + } + tw := tar.NewWriter(gw) + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: TOCTarName, + Size: int64(len(tocJSON)), + }); err != nil { + return "", err + } + if _, err := tw.Write(tocJSON); err != nil { + return "", err + } + + if err := tw.Close(); err != nil { + return "", err + } + if err := gz.Close(); err != nil { + return "", err + } + if _, err := w.Write(gzipFooterBytes(off)); err != nil { + return "", err + } + return digest.FromBytes(tocJSON), nil +} + +// gzipFooterBytes returns the 51 bytes footer. +func gzipFooterBytes(tocOff int64) []byte { + buf := bytes.NewBuffer(make([]byte, 0, FooterSize)) + gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes + + // Extra header indicating the offset of TOCJSON + // https://tools.ietf.org/html/rfc1952#section-2.3.1.1 + header := make([]byte, 4) + header[0], header[1] = 'S', 'G' + subfield := fmt.Sprintf("%016xSTARGZ", tocOff) + binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 + gz.Header.Extra = append(header, []byte(subfield)...) + gz.Close() + if buf.Len() != FooterSize { + panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) + } + return buf.Bytes() +} + +type GzipDecompressor struct{} + +func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != FooterSize { + return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, err + } + defer zr.Close() + extra := zr.Header.Extra + si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] + if si1 != 'S' || si2 != 'G' { + return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) + } + if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) { + return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ")) + } + if string(subfield[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield") + } + tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *GzipDecompressor) FooterSize() int64 { + return FooterSize +} + +func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +type LegacyGzipDecompressor struct{} + +func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != legacyFooterSize { + return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) + } + defer zr.Close() + extra := zr.Header.Extra + if len(extra) != 16+len("STARGZ") { + return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") + } + if string(extra[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found") + } + tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *LegacyGzipDecompressor) FooterSize() int64 { + return legacyFooterSize +} + +func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + tr, err := decompressTOCEStargz(r) + if err != nil { + return nil, "", err + } + dgstr := digest.Canonical.Digester() + toc = new(JTOC) + if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { + return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) + } + if err := tr.Close(); err != nil { + return nil, "", err + } + return toc, dgstr.Digest(), nil +} + +func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) { + zr, err := gzip.NewReader(r) + if err != nil { + return nil, fmt.Errorf("malformed TOC gzip header: %v", err) + } + zr.Multistream(false) + tr := tar.NewReader(zr) + h, err := tr.Next() + if err != nil { + return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) + } + if h.Name != TOCTarName { + return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) + } + return readCloser{tr, zr.Close}, nil +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go new file mode 100644 index 00000000..0ca6fd75 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -0,0 +1,2366 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + "math/rand" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// TestingController is Compression with some helper methods necessary for testing. +type TestingController interface { + Compression + TestStreams(t *testing.T, b []byte, streams []int64) + DiffIDOf(*testing.T, []byte) string + String() string +} + +// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. +func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) { + t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) + t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +} + +type TestingControllerFactory func() TestingController + +const ( + uncompressedType int = iota + gzipType + zstdType +) + +var srcCompressions = []int{ + uncompressedType, + gzipType, + zstdType, +} + +var allowedPrefix = [4]string{"", "./", "/", "../"} + +// testBuild tests the resulting stargz blob built by this pkg has the same +// contents as the normal stargz blob. +func testBuild(t *testing.T, controllers ...TestingControllerFactory) { + tests := []struct { + name string + chunkSize int + minChunkSize []int + in []tarEntry + }{ + { + name: "regfiles and directories", + chunkSize: 4, + in: tarOf( + file("foo", "test1"), + dir("foo2/"), + file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})), + ), + }, + { + name: "empty files", + chunkSize: 4, + in: tarOf( + file("foo", "tttttt"), + file("foo_empty", ""), + file("foo2", "tttttt"), + file("foo_empty2", ""), + file("foo3", "tttttt"), + file("foo_empty3", ""), + file("foo4", "tttttt"), + file("foo_empty4", ""), + file("foo5", "tttttt"), + file("foo_empty5", ""), + file("foo6", "tttttt"), + ), + }, + { + name: "various files", + chunkSize: 4, + minChunkSize: []int{0, 64000}, + in: tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo1.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + { + name: "no contents", + chunkSize: 4, + in: tarOf( + file("baz.txt", ""), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + } + for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, newCL := range controllers { + newCL := newCL + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, prefix := range allowedPrefix { + prefix := prefix + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) { + tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) + // Test divideEntries() + entries, err := sortEntries(tarBlob, nil, nil) // identical order + if err != nil { + t.Fatalf("failed to parse tar: %v", err) + } + var merged []*entry + for _, part := range divideEntries(entries, 4) { + merged = append(merged, part...) + } + if !reflect.DeepEqual(entries, merged) { + for _, e := range entries { + t.Logf("Original: %v", e.header) + } + for _, e := range merged { + t.Logf("Merged: %v", e.header) + } + t.Errorf("divided entries couldn't be merged") + return + } + + // Prepare sample data + cl1 := newCL() + wantBuf := new(bytes.Buffer) + sw := NewWriterWithCompressor(wantBuf, cl1) + sw.MinChunkSize = minChunkSize + sw.ChunkSize = tt.chunkSize + if err := sw.AppendTar(tarBlob); err != nil { + t.Fatalf("failed to append tar to want stargz: %v", err) + } + if _, err := sw.Close(); err != nil { + t.Fatalf("failed to prepare want stargz: %v", err) + } + wantData := wantBuf.Bytes() + want, err := Open(io.NewSectionReader( + bytes.NewReader(wantData), 0, int64(len(wantData))), + WithDecompressors(cl1), + ) + if err != nil { + t.Fatalf("failed to parse the want stargz: %v", err) + } + + // Prepare testing data + var opts []Option + if minChunkSize > 0 { + opts = append(opts, WithMinChunkSize(minChunkSize)) + } + cl2 := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...) + if err != nil { + t.Fatalf("failed to build stargz: %v", err) + } + defer rc.Close() + gotBuf := new(bytes.Buffer) + if _, err := io.Copy(gotBuf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + gotData := gotBuf.Bytes() + got, err := Open(io.NewSectionReader( + bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), + WithDecompressors(cl2), + ) + if err != nil { + t.Fatalf("failed to parse the got stargz: %v", err) + } + + // Check DiffID is properly calculated + rc.Close() + diffID := rc.DiffID() + wantDiffID := cl2.DiffIDOf(t, gotData) + if diffID.String() != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + // Compare as stargz + if !isSameVersion(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz hasn't same json") + return + } + if !isSameEntries(t, want, got) { + t.Errorf("built stargz isn't same as the original") + return + } + + // Compare as tar.gz + if !isSameTarGz(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz isn't same tar.gz") + return + } + }) + } + } + } + } + } + } +} + +func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aGz, err := cla.Reader(bytes.NewReader(a)) + if err != nil { + t.Fatalf("failed to read A") + } + defer aGz.Close() + bGz, err := clb.Reader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("failed to read B") + } + defer bGz.Close() + + // Same as tar's Next() method but ignores landmarks and TOCJSON file + next := func(r *tar.Reader) (h *tar.Header, err error) { + for { + if h, err = r.Next(); err != nil { + return + } + if h.Name != PrefetchLandmark && + h.Name != NoPrefetchLandmark && + h.Name != TOCTarName { + return + } + } + } + + aTar := tar.NewReader(aGz) + bTar := tar.NewReader(bGz) + for { + // Fetch and parse next header. + aH, aErr := next(aTar) + bH, bErr := next(bTar) + if aErr != nil || bErr != nil { + if aErr == io.EOF && bErr == io.EOF { + break + } + t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr) + } + if !reflect.DeepEqual(aH, bH) { + t.Logf("different header (A = %v; B = %v)", aH, bH) + return false + + } + aFile, err := io.ReadAll(aTar) + if err != nil { + t.Fatal("failed to read tar payload of A") + } + bFile, err := io.ReadAll(bTar) + if err != nil { + t.Fatal("failed to read tar payload of B") + } + if !bytes.Equal(aFile, bFile) { + t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b)) + return false + } + } + + return true +} + +func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) + if err != nil { + t.Fatalf("failed to parse A: %v", err) + } + bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb) + if err != nil { + t.Fatalf("failed to parse B: %v", err) + } + t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC)) + t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC)) + return aJTOC.Version == bJTOC.Version +} + +func isSameEntries(t *testing.T, a, b *Reader) bool { + aroot, ok := a.Lookup("") + if !ok { + t.Fatalf("failed to get root of A") + } + broot, ok := b.Lookup("") + if !ok { + t.Fatalf("failed to get root of B") + } + aEntry := stargzEntry{aroot, a} + bEntry := stargzEntry{broot, b} + return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) +} + +func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { + buf := new(bytes.Buffer) + var w io.WriteCloser + var err error + if srcCompression == gzipType { + w = gzip.NewWriter(buf) + } else if srcCompression == zstdType { + w, err = zstd.NewWriter(buf) + if err != nil { + t.Fatalf("failed to init zstd writer: %v", err) + } + } else { + return src + } + src.Seek(0, io.SeekStart) + if _, err := io.Copy(w, src); err != nil { + t.Fatalf("failed to compress source") + } + if err := w.Close(); err != nil { + t.Fatalf("failed to finalize compress source") + } + data := buf.Bytes() + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) + +} + +type stargzEntry struct { + e *TOCEntry + r *Reader +} + +// contains checks if all child entries in "b" are also contained in "a". +// This function also checks if the files/chunks contain the same contents among "a" and "b". +func contains(t *testing.T, a, b stargzEntry) bool { + ae, ar := a.e, a.r + be, br := b.e, b.r + t.Logf("Comparing: %q vs %q", ae.Name, be.Name) + if !equalEntry(ae, be) { + t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be) + return false + } + if ae.Type == "dir" { + t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name, + allChildrenName(ae), allChildrenName(be)) + iscontain := true + ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool { + // Walk through all files on this stargz file. + + if aChild.Name == PrefetchLandmark || + aChild.Name == NoPrefetchLandmark { + return true // Ignore landmarks + } + + // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory + // because this points to the root directory itself. + if aChild.Name == "" && ae.Name == "" { + return true + } + + bChild, ok := be.LookupChild(aBaseName) + if !ok { + t.Logf("%q (base: %q): not found in b: %v", + ae.Name, aBaseName, allChildrenName(be)) + iscontain = false + return false + } + + childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r}) + if !childcontain { + t.Logf("%q != %q: non-equal dir", ae.Name, be.Name) + iscontain = false + return false + } + return true + }) + return iscontain + } else if ae.Type == "reg" { + af, err := ar.OpenFile(ae.Name) + if err != nil { + t.Fatalf("failed to open file %q on A: %v", ae.Name, err) + } + bf, err := br.OpenFile(be.Name) + if err != nil { + t.Fatalf("failed to open file %q on B: %v", be.Name, err) + } + + var nr int64 + for nr < ae.Size { + abytes, anext, aok := readOffset(t, af, nr, a) + bbytes, bnext, bok := readOffset(t, bf, nr, b) + if !aok && !bok { + break + } else if !(aok && bok) || anext != bnext { + t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", + ae.Name, be.Name, nr, aok, bok, anext, bnext) + return false + } + nr = anext + if !bytes.Equal(abytes, bbytes) { + t.Logf("%q != %q: different contents %v vs %v", + ae.Name, be.Name, string(abytes), string(bbytes)) + return false + } + } + return true + } + + return true +} + +func allChildrenName(e *TOCEntry) (children []string) { + e.ForeachChild(func(baseName string, _ *TOCEntry) bool { + children = append(children, baseName) + return true + }) + return +} + +func equalEntry(a, b *TOCEntry) bool { + // Here, we selectively compare fileds that we are interested in. + return a.Name == b.Name && + a.Type == b.Type && + a.Size == b.Size && + a.ModTime3339 == b.ModTime3339 && + a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time + a.LinkName == b.LinkName && + a.Mode == b.Mode && + a.UID == b.UID && + a.GID == b.GID && + a.Uname == b.Uname && + a.Gname == b.Gname && + (a.Offset >= 0) == (b.Offset >= 0) && + (a.NextOffset() > 0) == (b.NextOffset() > 0) && + a.DevMajor == b.DevMajor && + a.DevMinor == b.DevMinor && + a.NumLink == b.NumLink && + reflect.DeepEqual(a.Xattrs, b.Xattrs) && + // chunk-related infomations aren't compared in this function. + // ChunkOffset int64 `json:"chunkOffset,omitempty"` + // ChunkSize int64 `json:"chunkSize,omitempty"` + // children map[string]*TOCEntry + a.Digest == b.Digest +} + +func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { + ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) + if !ok { + return nil, 0, false + } + data := make([]byte, ce.ChunkSize) + t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset()) + n, err := r.ReadAt(data, ce.ChunkOffset) + if err != nil { + t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v", + e.e.Name, ce.ChunkOffset, ce.ChunkSize, err) + } + if int64(n) != ce.ChunkSize { + t.Fatalf("unexpected copied data size %d; want %d", + n, ce.ChunkSize) + } + return data[:n], offset + ce.ChunkSize, true +} + +func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { + jtocData, err := json.Marshal(*tocJSON) + if err != nil { + t.Fatalf("failed to marshal TOC JSON: %v", err) + } + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil { + t.Fatalf("failed to read toc json blob: %v", err) + } + return buf.String() +} + +const chunkSize = 3 + +// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) +type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) + +// testDigestAndVerify runs specified checks against sample stargz blobs. +func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { + tests := []struct { + name string + tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + checks []check + minChunkSize []int + }{ + { + name: "no-regfile", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + dir("test/"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + dir("test2/"), // modified + ), allowedPrefix[0])), + }, + }, + { + name: "small-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "bbb", dgstMap), + ) + }, + minChunkSize: []int{0, 64000}, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", ""), + file("foo.txt", "M"), // modified + dir("test/"), + file("test/bar.txt", "bbb"), + ), allowedPrefix[0])), + // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO + checkVerifyBrokenContentFail("foo.txt"), + }, + }, + { + name: "big-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified + file("foo.txt", "a"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + { + name: "with-non-regfiles", + minChunkSize: []int{0, 64000}, + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + regDigest(t, "bar/foo2.txt", "b", dgstMap), + regDigest(t, "foo3.txt", "c", dgstMap), + symlink("barlink", "test/bar.txt"), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + dir("test2/"), + link("test2/bazlink", "foo.txt"), // modified + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + } + + for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, newCL := range controllers { + newCL := newCL + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) { + // Get original tar file and chunk digests + dgstMap := make(map[string]digest.Digest) + tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) + + cl := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + tocDigest := rc.TOCDigest() + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + newStargz := buf.Bytes() + // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. + dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) + + for _, check := range tt.checks { + check(t, newStargz, tocDigest, dgstMap, cl, newCL) + } + }) + } + } + } + } + } + } +} + +// checkStargzTOC checks the TOC JSON of the passed stargz has the expected +// digest and contains valid chunks. It walks all entries in the stargz and +// checks all chunk digests stored to the TOC JSON match the actual contents. +func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + digestMapTOC, err := listDigests(io.NewSectionReader( + bytes.NewReader(sgzData), 0, int64(len(sgzData))), + controller, + ) + if err != nil { + t.Fatalf("failed to list digest: %v", err) + } + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + // Check the digest of TOC JSON based on the actual contents + // It's sure that TOC JSON exists in this archive because + // Open succeeded. + dgstr := digest.Canonical.Digester() + if _, err := io.Copy(dgstr.Hash(), tr); err != nil { + t.Fatalf("failed to calculate digest of TOC JSON: %v", + err) + } + if dgstr.Digest() != tocDigest { + t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest()) + } + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + // Get the original digest to make sure the file contents are kept unchanged + // from the original tar, during the whole conversion steps. + id := chunkID(h.Name, n, ce.ChunkSize) + want, ok := dgstMap[id] + if !ok { + t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v", + h.Name, n, ce.ChunkSize, dgstMap) + return + } + found[id] = true + + // Check the file contents + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if want != dgstr.Digest() { + t.Errorf("Invalid contents in converted stargz %q: %q; want %q", + h.Name, dgstr.Digest(), want) + return + } + + // Check the digest stored in TOC JSON + dgstTOC, ok := digestMapTOC[ce.Offset] + if !ok { + t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered", + h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset) + } + if want != dgstTOC { + t.Errorf("Invalid digest in TOCEntry %q: %q; want %q", + h.Name, dgstTOC, want) + return + } + + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyTOC checks the verification works for the TOC JSON of the passed +// stargz. It walks all entries in the stargz and checks the verifications for +// all chunks work. +func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Errorf("failed to verify stargz: %v", err) + return + } + + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + v, err := ev.Verifier(ce) + if err != nil { + t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n) + } + + found[chunkID(h.Name, n, ce.ChunkSize)] = true + + // Check the file contents + if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if !v.Verified() { + t.Errorf("Invalid contents in converted stargz %q (should be succeeded)", + h.Name) + return + } + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be +// detected during the verification and the verification returns an error. +func checkVerifyInvalidTOCEntryFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + funcs := map[string]rewriteFunc{ + "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var found bool + for _, e := range toc.Entries { + if cleanEntryName(e.Name) == filename { + if e.Type != "reg" && e.Type != "chunk" { + t.Fatalf("entry %q to break must be regfile or chunk", filename) + } + if e.ChunkDigest == "" { + t.Fatalf("entry %q is already invalid", filename) + } + e.ChunkDigest = "" + found = true + } + } + if !found { + t.Fatalf("rewrite target not found") + } + }, + "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var ( + sampleEntry *TOCEntry + targetEntry *TOCEntry + ) + for _, e := range toc.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if cleanEntryName(e.Name) == filename { + targetEntry = e + } else { + sampleEntry = e + } + } + } + if sampleEntry == nil { + t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") + } + if targetEntry == nil { + t.Fatalf("rewrite target not found") + } + targetEntry.Offset = sampleEntry.Offset + }, + } + + for name, rFunc := range funcs { + t.Run(name, func(t *testing.T) { + newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, newSgz); err != nil { + t.Fatalf("failed to get converted stargz") + } + isgz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(newTocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + }) + } + } +} + +// checkVerifyInvalidStargzFail checks if the verification detects that the +// given stargz file doesn't match to the expected digest and returns error. +func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + cl := newController() + rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + mStargz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(tocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + } +} + +// checkVerifyBrokenContentFail checks if the verifier detects broken contents +// that doesn't match to the expected digest and returns error. +func checkVerifyBrokenContentFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + // Parse stargz file + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Fatalf("failed to verify stargz: %v", err) + return + } + + // Open the target file + sr, err := sgz.OpenFile(filename) + if err != nil { + t.Fatalf("failed to open file %q", filename) + } + ce, ok := sgz.ChunkEntryForOffset(filename, 0) + if !ok { + t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0) + return + } + if ce.ChunkSize == 0 { + t.Fatalf("file mustn't be empty") + return + } + data := make([]byte, ce.ChunkSize) + if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil { + t.Errorf("failed to get data of a chunk of %q(offset=%q)", + filename, ce.ChunkOffset) + } + + // Check the broken chunk (must fail) + v, err := ev.Verifier(ce) + if err != nil { + t.Fatalf("failed to get verifier for %q", filename) + } + broken := append([]byte{^data[0]}, data[1:]...) + if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + filename, ce.ChunkOffset, ce.ChunkSize) + } + if v.Verified() { + t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)", + filename, data, broken) + } + } +} + +func chunkID(name string, offset, size int64) string { + return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) +} + +type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) + +func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { + decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) + if err != nil { + t.Fatalf("failed to extract TOC JSON: %v", err) + } + + rewrite(t, decodedJTOC, sgz) + + tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset) + if err != nil { + t.Fatalf("failed to create toc and footer: %v", err) + } + + // Reconstruct stargz file with the modified TOC JSON + if _, err := sgz.Seek(0, io.SeekStart); err != nil { + t.Fatalf("failed to reset the seek position of stargz: %v", err) + } + return io.MultiReader( + io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON) + tocFooter, // Rewritten TOC and footer + ), tocDigest +} + +func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) { + decodedJTOC, _, err := parseStargz(sgz, controller) + if err != nil { + return nil, err + } + digestMap := make(map[int64]digest.Digest) + for _, e := range decodedJTOC.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if e.Type == "reg" && e.Size == 0 { + continue // ignores empty file + } + if e.ChunkDigest == "" { + return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON", + e.Name, e.Offset) + } + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, err + } + digestMap[e.Offset] = d + } + } + return digestMap, nil +} + +func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) { + fSize := controller.FooterSize() + footer := make([]byte, fSize) + if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { + return nil, 0, fmt.Errorf("error reading footer: %w", err) + } + _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse footer: %w", err) + } + + // Decode the TOC JSON + var tocReader io.Reader + if tocOffset >= 0 { + tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + } + decodedJTOC, _, err = controller.ParseTOC(tocReader) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) + } + return decodedJTOC, tocOffset, nil +} + +func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { + const content = "Some contents" + invalidUtf8 := "\xff\xfe\xfd" + + xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} + sampleOwner := owner{uid: 50, gid: 100} + + data64KB := randomContents(64000) + + tests := []struct { + name string + chunkSize int + minChunkSize int + in []tarEntry + want []stargzCheck + wantNumGz int // expected number of streams + + wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz + wantFailOnLossLess bool + wantTOCVersion int // default = 1 + }{ + { + name: "empty", + in: tarOf(), + wantNumGz: 2, // (empty tar) + TOC + footer + want: checks( + numTOCEntries(0), + ), + }, + { + name: "1dir_1empty_file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", ""), + ), + wantNumGz: 3, // dir, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", 0), + entryHasChildren("foo", "bar.txt"), + hasFileDigest("foo/bar.txt", digestFor("")), + ), + }, + { + name: "1dir_1file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", content, xAttrFile), + ), + wantNumGz: 4, // var dir, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + hasFileDigest("foo/bar.txt", digestFor(content)), + hasFileContentsRange("foo/bar.txt", 0, content), + hasFileContentsRange("foo/bar.txt", 1, content[1:]), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar.txt"), + hasFileXattrs("foo/bar.txt", "foo", "bar"), + hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8), + ), + }, + { + name: "2meta_2file", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + }, + { + name: "3dir", + in: tarOf( + dir("bar/"), + dir("foo/"), + dir("foo/bar/"), + ), + wantNumGz: 3, // 3 dirs, TOC, footer + want: checks( + hasDirLinkCount("bar/", 2), + hasDirLinkCount("foo/", 3), + hasDirLinkCount("foo/bar/", 2), + ), + }, + { + name: "symlink", + in: tarOf( + dir("foo/"), + symlink("foo/bar", "../../x"), + ), + wantNumGz: 3, // metas + TOC + footer + want: checks( + numTOCEntries(2), + hasSymlink("foo/bar", "../../x"), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar"), + ), + }, + { + name: "chunked_file", + chunkSize: 4, + in: tarOf( + dir("foo/"), + file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), + ), + wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer + want: checks( + numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file + hasDir("foo/"), + hasFileLen("foo/big.txt", len("This is such a big file")), + hasFileDigest("foo/big.txt", digestFor("This is such a big file")), + hasFileContentsRange("foo/big.txt", 0, "This is such a big file"), + hasFileContentsRange("foo/big.txt", 1, "his is such a big file"), + hasFileContentsRange("foo/big.txt", 2, "is is such a big file"), + hasFileContentsRange("foo/big.txt", 3, "s is such a big file"), + hasFileContentsRange("foo/big.txt", 4, " is such a big file"), + hasFileContentsRange("foo/big.txt", 5, "is such a big file"), + hasFileContentsRange("foo/big.txt", 6, "s such a big file"), + hasFileContentsRange("foo/big.txt", 7, " such a big file"), + hasFileContentsRange("foo/big.txt", 8, "such a big file"), + hasFileContentsRange("foo/big.txt", 9, "uch a big file"), + hasFileContentsRange("foo/big.txt", 10, "ch a big file"), + hasFileContentsRange("foo/big.txt", 11, "h a big file"), + hasFileContentsRange("foo/big.txt", 12, " a big file"), + hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""), + hasChunkEntries("foo/big.txt", 6), + ), + }, + { + name: "recursive", + in: tarOf( + dir("/", sampleOwner), + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // dirs, bar.txt alone, TOC, footer + want: checks( + maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt" + ), + }, + { + name: "block_char_fifo", + in: tarOf( + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "b", + Typeflag: tar.TypeBlock, + Devmajor: 123, + Devminor: 456, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "c", + Typeflag: tar.TypeChar, + Devmajor: 111, + Devminor: 222, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "f", + Typeflag: tar.TypeFifo, + Format: format, + }) + }), + ), + wantNumGz: 3, + want: checks( + lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}), + lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}), + lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}), + ), + }, + { + name: "modes", + in: tarOf( + dir("foo1/", 0755|os.ModeDir|os.ModeSetgid), + file("foo1/bar1", content, 0700|os.ModeSetuid), + file("foo1/bar2", content, 0755|os.ModeSetgid), + dir("foo2/", 0755|os.ModeDir|os.ModeSticky), + file("foo2/bar3", content, 0755|os.ModeSticky), + dir("foo3/", 0755|os.ModeDir), + file("foo3/bar4", content, os.FileMode(0700)), + file("foo3/bar5", content, os.FileMode(0755)), + ), + wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer + want: checks( + hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid), + hasMode("foo1/bar1", 0700|os.ModeSetuid), + hasMode("foo1/bar2", 0755|os.ModeSetgid), + hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky), + hasMode("foo2/bar3", 0755|os.ModeSticky), + hasMode("foo3/", 0755|os.ModeDir), + hasMode("foo3/bar4", os.FileMode(0700)), + hasMode("foo3/bar5", os.FileMode(0755)), + ), + }, + { + name: "lossy", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error) + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + wantFailOnLossLess: true, + }, + { + name: "hardlink should be replaced to the destination entry", + in: tarOf( + dir("foo/"), + file("foo/foo1", "test"), + link("foolink", "foo/foo1"), + ), + wantNumGz: 4, // dir, foo1 + link, TOC, footer + want: checks( + mustSameEntry("foo/foo1", "foolink"), + ), + }, + { + name: "several_files_in_chunk", + minChunkSize: 8000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + file("foo22", "ccc"), + dir("bar/"), + file("bar/bar.txt", "aaa"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed "data64KB" is still larger than 8KB + wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3 + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo22", len("ccc")), + hasFileLen("bar/bar.txt", len("aaa")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo22", digestFor("ccc")), + hasFileDigest("bar/bar.txt", digestFor("aaa")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo22", 0, "ccc"), + hasFileContentsRange("foo22", 1, "cc"), + hasFileContentsRange("foo22", 2, "c"), + hasFileContentsRange("bar/bar.txt", 0, "aaa"), + hasFileContentsRange("bar/bar.txt", 1, "aa"), + hasFileContentsRange("bar/bar.txt", 2, "a"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, + { + name: "several_files_in_chunk_chunked", + minChunkSize: 8000, + chunkSize: 32000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + dir("bar/"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB + wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks) + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo/foo1", 1, data64KB[1:]), + hasFileContentsRange("foo/foo1", 2, data64KB[2:]), + hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, + } + + for _, tt := range tests { + for _, newCL := range controllers { + newCL := newCL + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, lossless := range []bool{true, false} { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) { + var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) + origTarDgstr := digest.Canonical.Digester() + tr = io.TeeReader(tr, origTarDgstr.Hash()) + var stargzBuf bytes.Buffer + cl1 := newCL() + w := NewWriterWithCompressor(&stargzBuf, cl1) + w.ChunkSize = tt.chunkSize + w.MinChunkSize = tt.minChunkSize + if lossless { + err := w.AppendTarLossLess(tr) + if tt.wantFailOnLossLess { + if err != nil { + return // expected to fail + } + t.Fatalf("Append wanted to fail on lossless") + } + if err != nil { + t.Fatalf("Append(lossless): %v", err) + } + } else { + if err := w.AppendTar(tr); err != nil { + t.Fatalf("Append: %v", err) + } + } + if _, err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + b := stargzBuf.Bytes() + + if lossless { + // Check if the result blob reserves original tar metadata + rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1) + if err != nil { + t.Errorf("failed to decompress blob: %v", err) + return + } + defer rc.Close() + resultDgstr := digest.Canonical.Digester() + if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil { + t.Errorf("failed to read result decompressed blob: %v", err) + return + } + if resultDgstr.Digest() != origTarDgstr.Digest() { + t.Errorf("lossy compression occurred: digest=%v; want %v", + resultDgstr.Digest(), origTarDgstr.Digest()) + return + } + } + + diffID := w.DiffID() + wantDiffID := cl1.DiffIDOf(t, b) + if diffID != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + telemetry, checkCalled := newCalledTelemetry() + sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))) + r, err := Open( + sr, + WithDecompressors(cl1), + WithTelemetry(telemetry), + ) + if err != nil { + t.Fatalf("stargz.Open: %v", err) + } + wantTOCVersion := 1 + if tt.wantTOCVersion > 0 { + wantTOCVersion = tt.wantTOCVersion + } + if r.toc.Version != wantTOCVersion { + t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion) + } + + footerSize := cl1.FooterSize() + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + t.Errorf("failed to read footer: %v", err) + } + _, tocOffset, _, err := cl1.ParseFooter(footer) + if err != nil { + t.Errorf("failed to parse footer: %v", err) + } + if err := checkCalled(tocOffset >= 0); err != nil { + t.Errorf("telemetry failure: %v", err) + } + + wantNumGz := tt.wantNumGz + if lossless && tt.wantNumGzLossLess > 0 { + wantNumGz = tt.wantNumGzLossLess + } + streamOffsets := []int64{0} + prevOffset := int64(-1) + streams := 0 + for _, e := range r.toc.Entries { + if e.Offset > prevOffset { + streamOffsets = append(streamOffsets, e.Offset) + prevOffset = e.Offset + streams++ + } + } + streams++ // TOC + if tocOffset >= 0 { + // toc is in the blob + streamOffsets = append(streamOffsets, tocOffset) + } + streams++ // footer + streamOffsets = append(streamOffsets, footerOffset) + if streams != wantNumGz { + t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz) + } + + t.Logf("testing streams: %+v", streamOffsets) + cl1.TestStreams(t, b, streamOffsets) + + for _, want := range tt.want { + want.check(t, r) + } + }) + } + } + } + } + } +} + +type chunkInfo struct { + name string + data string +} + +func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) { + var getFooterLatencyCalled bool + var getTocLatencyCalled bool + var deserializeTocLatencyCalled bool + return &Telemetry{ + func(time.Time) { getFooterLatencyCalled = true }, + func(time.Time) { getTocLatencyCalled = true }, + func(time.Time) { deserializeTocLatencyCalled = true }, + }, func(needsGetTOC bool) error { + var allErr []error + if !getFooterLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) + } + if needsGetTOC { + if !getTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + } + } + if !deserializeTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) + } + return errorutil.Aggregate(allErr) + } +} + +func digestFor(content string) string { + sum := sha256.Sum256([]byte(content)) + return fmt.Sprintf("sha256:%x", sum) +} + +type numTOCEntries int + +func (n numTOCEntries) check(t *testing.T, r *Reader) { + if r.toc == nil { + t.Fatal("nil TOC") + } + if got, want := len(r.toc.Entries), int(n); got != want { + t.Errorf("got %d TOC entries; want %d", got, want) + } + t.Logf("got TOC entries:") + for i, ent := range r.toc.Entries { + entj, _ := json.Marshal(ent) + t.Logf(" [%d]: %s\n", i, entj) + } + if t.Failed() { + t.FailNow() + } +} + +func checks(s ...stargzCheck) []stargzCheck { return s } + +type stargzCheck interface { + check(t *testing.T, r *Reader) +} + +type stargzCheckFn func(*testing.T, *Reader) + +func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } + +func maxDepth(max int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup("") + if !ok { + t.Fatal("root directory not found") + } + d, err := getMaxDepth(t, e, 0, 10*max) + if err != nil { + t.Errorf("failed to get max depth (wanted %d): %v", max, err) + return + } + if d != max { + t.Errorf("invalid depth %d; want %d", d, max) + return + } + }) +} + +func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { + if current > limit { + return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", + current, limit) + } + max = current + e.ForeachChild(func(baseName string, ent *TOCEntry) bool { + t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name) + d, err := getMaxDepth(t, ent, current+1, limit) + if err != nil { + rErr = err + return false + } + if d > max { + max = d + } + return true + }) + return +} + +func hasFileLen(file string, wantLen int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } else if ent.Size != int64(wantLen) { + t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen) + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileXattrs(file, name, value string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } + if ent.Xattrs == nil { + t.Errorf("file %q has no xattrs", file) + return + } + valueFound, found := ent.Xattrs[name] + if !found { + t.Errorf("file %q has no xattr %q", file, name) + return + } + if string(valueFound) != value { + t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value) + } + + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileDigest(file string, digest string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("didn't find TOCEntry for file %q", file) + } + if ent.Digest != digest { + t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest) + } + }) +} + +func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + extraMap := make(map[string]chunkInfo) + for _, e := range extra { + extraMap[e.name] = e + } + var extraNames []string + for n := range extraMap { + extraNames = append(extraNames, n) + } + f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error { + t.Logf("On %q: got preread of %q", file, e.Name) + ex, ok := extraMap[e.Name] + if !ok { + t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames) + } + got, err := io.ReadAll(cr) + if err != nil { + t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err) + } + if ex.data != string(got) { + t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data)) + } + delete(extraMap, e.Name) + return nil + }) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) + } + if len(extraMap) != 0 { + var exNames []string + for _, ex := range extraMap { + exNames = append(exNames, ex.name) + } + t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames) + } + }) +} + +func hasFileContentsRange(file string, offset int, want string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + f, err := r.OpenFile(file) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) + } + }) +} + +func hasChunkEntries(file string, wantChunks int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("no file for %q", file) + } + if ent.Type != "reg" { + t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type) + } + chunks := r.getChunks(ent) + if len(chunks) != wantChunks { + t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks) + return + } + f := chunks[0] + + var gotChunks []*TOCEntry + var last *TOCEntry + for off := int64(0); off < f.Size; off++ { + e, ok := r.ChunkEntryForOffset(file, off) + if !ok { + t.Errorf("no ChunkEntryForOffset at %d", off) + return + } + if last != e { + gotChunks = append(gotChunks, e) + last = e + } + } + if !reflect.DeepEqual(chunks, gotChunks) { + t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks) + } + + // And verify the NextOffset + for i := 0; i < len(gotChunks)-1; i++ { + ci := gotChunks[i] + cnext := gotChunks[i+1] + if ci.NextOffset() != cnext.Offset { + t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset) + } + } + }) +} + +func entryHasChildren(dir string, want ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + want := append([]string(nil), want...) + var got []string + ent, ok := r.Lookup(dir) + if !ok { + t.Fatalf("didn't find TOCEntry for dir node %q", dir) + } + for baseName := range ent.children { + got = append(got, baseName) + } + sort.Strings(got) + sort.Strings(want) + if !reflect.DeepEqual(got, want) { + t.Errorf("children of %q = %q; want %q", dir, got, want) + } + }) +} + +func hasDir(file string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasDirLinkCount(file string, count int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + return + } + if ent.NumLink != count { + t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasMode(file string, mode os.FileMode) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Stat().Mode() != mode { + t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode) + return + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasSymlink(file, target string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "symlink" { + t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type) + } else if ent.LinkName != target { + t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target) + } + return + } + } + t.Errorf("symlink %q not found", file) + }) +} + +func lookupMatch(name string, want *TOCEntry) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup(name) + if !ok { + t.Fatalf("failed to Lookup entry %q", name) + } + if !reflect.DeepEqual(e, want) { + t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want) + } + + }) +} + +func hasEntryOwner(entry string, owner owner) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) + if !ok { + t.Errorf("entry %q not found", entry) + return + } + if ent.UID != owner.uid || ent.GID != owner.gid { + t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid) + return + } + }) +} + +func mustSameEntry(files ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + var first *TOCEntry + for _, f := range files { + if first == nil { + var ok bool + first, ok = r.Lookup(f) + if !ok { + t.Errorf("unknown first file on Lookup: %q", f) + return + } + } + + // Test Lookup + e, ok := r.Lookup(f) + if !ok { + t.Errorf("unknown file on Lookup: %q", f) + return + } + if e != first { + t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test LookupChild + pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get parent of %q", f) + return + } + e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get %q as the child of %+v", f, pe) + return + } + if e != first { + t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test ForeachChild + pe.ForeachChild(func(baseName string, e *TOCEntry) bool { + if baseName == filepath.Base(filepath.Clean(f)) { + if e != first { + t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first) + return false + } + } + return true + }) + } + }) +} + +func viewContent(c []byte) string { + if len(c) < 100 { + return string(c) + } + return string(c[:50]) + "...(omit)..." + string(c[50:100]) +} + +func tarOf(s ...tarEntry) []tarEntry { return s } + +type tarEntry interface { + appendTar(tw *tar.Writer, prefix string, format tar.Format) error +} + +type tarEntryFunc func(*tar.Writer, string, tar.Format) error + +func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error { + return f(tw, prefix, format) +} + +func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { + format := tar.FormatUnknown + for _, opt := range opts { + switch v := opt.(type) { + case tar.Format: + format = v + default: + panic(fmt.Errorf("unsupported opt for buildTar: %v", opt)) + } + } + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, ent := range ents { + if err := ent.appendTar(tw, prefix, format); err != nil { + t.Fatalf("building input tar: %v", err) + } + } + if err := tw.Close(); err != nil { + t.Errorf("closing write of input tar: %v", err) + } + data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) +} + +func dir(name string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var o owner + mode := os.FileMode(0755) + for _, opt := range opts { + switch v := opt.(type) { + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if !strings.HasSuffix(name, "/") { + panic(fmt.Sprintf("missing trailing slash in dir %q ", name)) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: prefix + name, + Mode: tm, + Uid: o.uid, + Gid: o.gid, + Format: format, + }) + }) +} + +// xAttr are extended attributes to set on test files created with the file func. +type xAttr map[string]string + +// owner is owner ot set on test files and directories with the file and dir functions. +type owner struct { + uid int + gid int +} + +func file(name, contents string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var xattrs xAttr + var o owner + mode := os.FileMode(0644) + for _, opt := range opts { + switch v := opt.(type) { + case xAttr: + xattrs = v + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if strings.HasSuffix(name, "/") { + return fmt.Errorf("bogus trailing slash in file %q", name) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + if len(xattrs) > 0 { + format = tar.FormatPAX // only PAX supports xattrs + } + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Mode: tm, + Xattrs: xattrs, + Size: int64(len(contents)), + Uid: o.uid, + Gid: o.gid, + Format: format, + }); err != nil { + return err + } + _, err = io.WriteString(tw, contents) + return err + }) +} + +func symlink(name, target string) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeSymlink, + Name: prefix + name, + Linkname: target, + Mode: 0644, + Format: format, + }) + }) +} + +func link(name string, linkname string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeLink, + Name: prefix + name, + Linkname: linkname, + ModTime: now, + Format: format, + }) + }) +} + +func chardev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeChar, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} + +func blockdev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeBlock, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} +func fifo(name string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeFifo, + Name: prefix + name, + ModTime: now, + Format: format, + }) + }) +} + +func prefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func noPrefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { + if digestMap == nil { + t.Fatalf("digest map mustn't be nil") + } + content := []byte(contentStr) + + var n int64 + for n < int64(len(content)) { + size := int64(chunkSize) + remain := int64(len(content)) - n + if remain < size { + size = remain + } + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil { + t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)", + string(content[n:n+size]), name, n, size) + } + digestMap[chunkID(name, n, size)] = dgstr.Digest() + n += size + } + + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Size: int64(len(content)), + Format: format, + }); err != nil { + return err + } + if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil { + return err + } + return nil + }) +} + +var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randomContents(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = runes[rand.Intn(len(runes))] + } + return string(b) +} + +func fileModeToTarMode(mode os.FileMode) (int64, error) { + h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") + if err != nil { + return 0, err + } + return h.Mode, nil +} + +// fileInfoOnlyMode is os.FileMode that populates only file mode. +type fileInfoOnlyMode os.FileMode + +func (f fileInfoOnlyMode) Name() string { return "" } +func (f fileInfoOnlyMode) Size() int64 { return 0 } +func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } +func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } +func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } +func (f fileInfoOnlyMode) Sys() interface{} { return nil } + +func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { + if len(streams) == 0 { + return // nop + } + + wants := map[int64]struct{}{} + for _, s := range streams { + wants[s] = struct{}{} + } + + len0 := len(b) + br := bytes.NewReader(b) + zr := new(gzip.Reader) + t.Logf("got gzip streams:") + numStreams := 0 + for { + zoff := len0 - br.Len() + if err := zr.Reset(br); err != nil { + if err == io.EOF { + return + } + t.Fatalf("countStreams(gzip), Reset: %v", err) + } + zr.Multistream(false) + n, err := io.Copy(io.Discard, zr) + if err != nil { + t.Fatalf("countStreams(gzip), Copy: %v", err) + } + var extra string + if len(zr.Header.Extra) > 0 { + extra = fmt.Sprintf("; extra=%q", zr.Header.Extra) + } + t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra) + delete(wants, int64(zoff)) + numStreams++ + } +} + +func GzipDiffIDOf(t *testing.T, b []byte) string { + h := sha256.New() + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("diffIDOf(gzip): %v", err) + } + defer zr.Close() + if _, err := io.Copy(h, zr); err != nil { + t.Fatalf("diffIDOf(gzip).Copy: %v", err) + } + return fmt.Sprintf("sha256:%x", h.Sum(nil)) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go new file mode 100644 index 00000000..57e0aa61 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -0,0 +1,342 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "hash" + "io" + "os" + "path" + "time" + + digest "github.com/opencontainers/go-digest" +) + +const ( + // TOCTarName is the name of the JSON file in the tar archive in the + // table of contents gzip stream. + TOCTarName = "stargz.index.json" + + // FooterSize is the number of bytes in the footer + // + // The footer is an empty gzip stream with no compression and an Extra + // header of the form "%016xSTARGZ", where the 64 bit hex-encoded + // number is the offset to the gzip stream of JSON TOC. + // + // 51 comes from: + // + // 10 bytes gzip header + // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + // 2 bytes Extra: SI1 = 'S', SI2 = 'G' + // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + // 5 bytes flate header + // 8 bytes gzip footer + // (End of the eStargz blob) + // + // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz. + FooterSize = 51 + + // legacyFooterSize is the number of bytes in the legacy stargz footer. + // + // 47 comes from: + // + // 10 byte gzip header + + // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" + + // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset)) + // 5 byte flate header + // 8 byte gzip footer (two little endian uint32s: digest, size) + legacyFooterSize = 47 + + // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the + // digest of the TOC JSON. + // This annotation is valid only when it is specified in `.[]layers.annotations` + // of an image manifest. + TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + + // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy + // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size + // to the runtime but current OCI image doesn't ship this information by default. So we store this + // to the special annotation. + StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size" + + // PrefetchLandmark is a file entry which indicates the end position of + // prefetch in the stargz file. + PrefetchLandmark = ".prefetch.landmark" + + // NoPrefetchLandmark is a file entry which indicates that no prefetch should + // occur in the stargz file. + NoPrefetchLandmark = ".no.prefetch.landmark" + + landmarkContents = 0xf +) + +// JTOC is the JSON-serialized table of contents index of the files in the stargz file. +type JTOC struct { + Version int `json:"version"` + Entries []*TOCEntry `json:"entries"` +} + +// TOCEntry is an entry in the stargz file's TOC (Table of Contents). +type TOCEntry struct { + // Name is the tar entry's name. It is the complete path + // stored in the tar file, not just the base name. + Name string `json:"name"` + + // Type is one of "dir", "reg", "symlink", "hardlink", "char", + // "block", "fifo", or "chunk". + // The "chunk" type is used for regular file data chunks past the first + // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset, + // ChunkOffset, and ChunkSize populated. + Type string `json:"type"` + + // Size, for regular files, is the logical size of the file. + Size int64 `json:"size,omitempty"` + + // ModTime3339 is the modification time of the tar entry. Empty + // means zero or unknown. Otherwise it's in UTC RFC3339 + // format. Use the ModTime method to access the time.Time value. + ModTime3339 string `json:"modtime,omitempty"` + modTime time.Time + + // LinkName, for symlinks and hardlinks, is the link target. + LinkName string `json:"linkName,omitempty"` + + // Mode is the permission and mode bits. + Mode int64 `json:"mode,omitempty"` + + // UID is the user ID of the owner. + UID int `json:"uid,omitempty"` + + // GID is the group ID of the owner. + GID int `json:"gid,omitempty"` + + // Uname is the username of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same UID. + Uname string `json:"userName,omitempty"` + + // Gname is the group name of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same GID. + Gname string `json:"groupName,omitempty"` + + // Offset, for regular files, provides the offset in the + // stargz file to the file's data bytes. See ChunkOffset and + // ChunkSize. + Offset int64 `json:"offset,omitempty"` + + // InnerOffset is an optional field indicates uncompressed offset + // of this "reg" or "chunk" payload in a stream starts from Offset. + // This field enables to put multiple "reg" or "chunk" payloads + // in one chunk with having the same Offset but different InnerOffset. + InnerOffset int64 `json:"innerOffset,omitempty"` + + nextOffset int64 // the Offset of the next entry with a non-zero Offset + + // DevMajor is the major device number for "char" and "block" types. + DevMajor int `json:"devMajor,omitempty"` + + // DevMinor is the major device number for "char" and "block" types. + DevMinor int `json:"devMinor,omitempty"` + + // NumLink is the number of entry names pointing to this entry. + // Zero means one name references this entry. + // This field is calculated during runtime and not recorded in TOC JSON. + NumLink int `json:"-"` + + // Xattrs are the extended attribute for the entry. + Xattrs map[string][]byte `json:"xattrs,omitempty"` + + // Digest stores the OCI checksum for regular files payload. + // It has the form "sha256:abcdef01234....". + Digest string `json:"digest,omitempty"` + + // ChunkOffset is non-zero if this is a chunk of a large, + // regular file. If so, the Offset is where the gzip header of + // ChunkSize bytes at ChunkOffset in Name begin. + // + // In serialized form, a "chunkSize" JSON field of zero means + // that the chunk goes to the end of the file. After reading + // from the stargz TOC, though, the ChunkSize is initialized + // to a non-zero file for when Type is either "reg" or + // "chunk". + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkSize int64 `json:"chunkSize,omitempty"` + + // ChunkDigest stores an OCI digest of the chunk. This must be formed + // as "sha256:0123abcd...". + ChunkDigest string `json:"chunkDigest,omitempty"` + + children map[string]*TOCEntry + + // chunkTopIndex is index of the entry where Offset starts in the blob. + chunkTopIndex int +} + +// ModTime returns the entry's modification time. +func (e *TOCEntry) ModTime() time.Time { return e.modTime } + +// NextOffset returns the position (relative to the start of the +// stargz file) of the next gzip boundary after e.Offset. +func (e *TOCEntry) NextOffset() int64 { return e.nextOffset } + +func (e *TOCEntry) addChild(baseName string, child *TOCEntry) { + if e.children == nil { + e.children = make(map[string]*TOCEntry) + } + if child.Type == "dir" { + e.NumLink++ // Entry ".." in the subdirectory links to this directory + } + e.children[baseName] = child +} + +// isDataType reports whether TOCEntry is a regular file or chunk (something that +// contains regular file data). +func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" } + +// Stat returns a FileInfo value representing e. +func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} } + +// ForeachChild calls f for each child item. If f returns false, iteration ends. +// If e is not a directory, f is not called. +func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) { + for name, ent := range e.children { + if !f(name, ent) { + return + } + } +} + +// LookupChild returns the directory e's child by its base name. +func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) { + child, ok = e.children[baseName] + return +} + +// fileInfo implements os.FileInfo using the wrapped *TOCEntry. +type fileInfo struct{ e *TOCEntry } + +var _ os.FileInfo = fileInfo{} + +func (fi fileInfo) Name() string { return path.Base(fi.e.Name) } +func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" } +func (fi fileInfo) Size() int64 { return fi.e.Size } +func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() } +func (fi fileInfo) Sys() interface{} { return fi.e } +func (fi fileInfo) Mode() (m os.FileMode) { + // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg. + m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() & + (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky) + switch fi.e.Type { + case "dir": + m |= os.ModeDir + case "symlink": + m |= os.ModeSymlink + case "char": + m |= os.ModeDevice | os.ModeCharDevice + case "block": + m |= os.ModeDevice + case "fifo": + m |= os.ModeNamedPipe + } + return m +} + +// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained +// in a eStargz blob. +type TOCEntryVerifier interface { + + // Verifier provides a content verifier that can be used for verifying the + // contents of the specified TOCEntry. + Verifier(ce *TOCEntry) (digest.Verifier, error) +} + +// Compression provides the compression helper to be used creating and parsing eStargz. +// This package provides gzip-based Compression by default, but any compression +// algorithm (e.g. zstd) can be used as long as it implements Compression. +type Compression interface { + Compressor + Decompressor +} + +// Compressor represents the helper mothods to be used for creating eStargz. +type Compressor interface { + // Writer returns WriteCloser to be used for writing a chunk to eStargz. + // Everytime a chunk is written, the WriteCloser is closed and Writer is + // called again for writing the next chunk. + // + // The returned writer should implement "Flush() error" function that flushes + // any pending compressed data to the underlying writer. + Writer(w io.Writer) (WriteFlushCloser, error) + + // WriteTOCAndFooter is called to write JTOC to the passed Writer. + // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob + // WriteTOCAndFooter can optionally write anything that affects DiffID calculation + // (e.g. uncompressed TOC JSON). + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob when it's parsed. + WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error) +} + +// Decompressor represents the helper mothods to be used for parsing eStargz. +type Decompressor interface { + // Reader returns ReadCloser to be used for decompressing file payload. + Reader(r io.Reader) (io.ReadCloser, error) + + // FooterSize returns the size of the footer of this blob. + FooterSize() int64 + + // ParseFooter parses the footer and returns the offset and (compressed) size of TOC. + // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between + // the top until the TOC JSON). + // + // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader + // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it. + // + // tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the + // footer (blob size - tocOff - FooterSize). + // If blobPayloadSize < 0, blobPayloadSize become the blob size. + ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) + + // ParseTOC parses TOC from the passed reader. The reader provides the partial contents + // of the underlying blob that has the range specified by ParseFooter method. + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob. This must match to the value returned from + // Compressor.WriteTOCAndFooter that is used when creating this blob. + // + // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob. + // Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location + // and return it. + ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) +} + +type WriteFlushCloser interface { + io.WriteCloser + Flush() error +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index 609a88c2..5db7f8b8 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -37,7 +37,6 @@ type ConfigFile struct { PruneFilters []string `json:"pruneFilters,omitempty"` Proxies map[string]ProxyConfig `json:"proxies,omitempty"` Experimental string `json:"experimental,omitempty"` - StackOrchestrator string `json:"stackOrchestrator,omitempty"` // Deprecated: swarm is now the default orchestrator, and this option is ignored. CurrentContext string `json:"currentContext,omitempty"` CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` Plugins map[string]map[string]string `json:"plugins,omitempty"` diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go index a0b035c9..202ddb84 100644 --- a/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go @@ -32,7 +32,7 @@ import ( ) // New returns net.Conn -func New(ctx context.Context, cmd string, args ...string) (net.Conn, error) { +func New(_ context.Context, cmd string, args ...string) (net.Conn, error) { var ( c commandConn err error diff --git a/vendor/github.com/docker/cli/cli/streams/in.go b/vendor/github.com/docker/cli/cli/streams/in.go index 44b0de38..1a10f7c8 100644 --- a/vendor/github.com/docker/cli/cli/streams/in.go +++ b/vendor/github.com/docker/cli/cli/streams/in.go @@ -9,38 +9,42 @@ import ( "github.com/moby/term" ) -// In is an input stream used by the DockerCli to read user input +// In is an input stream to read user input. It implements [io.ReadCloser] +// with additional utilities, such as putting the terminal in raw mode. type In struct { commonStream in io.ReadCloser } +// Read implements the [io.Reader] interface. func (i *In) Read(p []byte) (int, error) { return i.in.Read(p) } -// Close implements the Closer interface +// Close implements the [io.Closer] interface. func (i *In) Close() error { return i.in.Close() } -// SetRawTerminal sets raw mode on the input terminal +// SetRawTerminal sets raw mode on the input terminal. It is a no-op if In +// is not a TTY, or if the "NORAW" environment variable is set to a non-empty +// value. func (i *In) SetRawTerminal() (err error) { - if os.Getenv("NORAW") != "" || !i.commonStream.isTerminal { + if !i.isTerminal || os.Getenv("NORAW") != "" { return nil } - i.commonStream.state, err = term.SetRawTerminal(i.commonStream.fd) + i.state, err = term.SetRawTerminal(i.fd) return err } -// CheckTty checks if we are trying to attach to a container tty -// from a non-tty client input stream, and if so, returns an error. +// CheckTty checks if we are trying to attach to a container TTY +// from a non-TTY client input stream, and if so, returns an error. func (i *In) CheckTty(attachStdin, ttyMode bool) error { // In order to attach to a container tty, input stream for the client must // be a tty itself: redirecting or piping the client standard input is // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. if ttyMode && attachStdin && !i.isTerminal { - eText := "the input device is not a TTY" + const eText = "the input device is not a TTY" if runtime.GOOS == "windows" { return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") } @@ -49,8 +53,9 @@ func (i *In) CheckTty(attachStdin, ttyMode bool) error { return nil } -// NewIn returns a new In object from a ReadCloser +// NewIn returns a new [In] from an [io.ReadCloser]. func NewIn(in io.ReadCloser) *In { - fd, isTerminal := term.GetFdInfo(in) - return &In{commonStream: commonStream{fd: fd, isTerminal: isTerminal}, in: in} + i := &In{in: in} + i.fd, i.isTerminal = term.GetFdInfo(in) + return i } diff --git a/vendor/github.com/docker/cli/cli/streams/out.go b/vendor/github.com/docker/cli/cli/streams/out.go index 95e21464..2383b085 100644 --- a/vendor/github.com/docker/cli/cli/streams/out.go +++ b/vendor/github.com/docker/cli/cli/streams/out.go @@ -8,8 +8,9 @@ import ( "github.com/sirupsen/logrus" ) -// Out is an output stream used by the DockerCli to write normal program -// output. +// Out is an output stream to write normal program output. It implements +// an [io.Writer], with additional utilities for detecting whether a terminal +// is connected, getting the TTY size, and putting the terminal in raw mode. type Out struct { commonStream out io.Writer @@ -19,23 +20,29 @@ func (o *Out) Write(p []byte) (int, error) { return o.out.Write(p) } -// SetRawTerminal sets raw mode on the input terminal +// SetRawTerminal puts the output of the terminal connected to the stream +// into raw mode. +// +// On UNIX, this does nothing. On Windows, it disables LF -> CRLF/ translation. +// It is a no-op if Out is not a TTY, or if the "NORAW" environment variable is +// set to a non-empty value. func (o *Out) SetRawTerminal() (err error) { - if os.Getenv("NORAW") != "" || !o.commonStream.isTerminal { + if !o.isTerminal || os.Getenv("NORAW") != "" { return nil } - o.commonStream.state, err = term.SetRawTerminalOutput(o.commonStream.fd) + o.state, err = term.SetRawTerminalOutput(o.fd) return err } -// GetTtySize returns the height and width in characters of the tty -func (o *Out) GetTtySize() (uint, uint) { +// GetTtySize returns the height and width in characters of the TTY, or +// zero for both if no TTY is connected. +func (o *Out) GetTtySize() (height uint, width uint) { if !o.isTerminal { return 0, 0 } ws, err := term.GetWinsize(o.fd) if err != nil { - logrus.Debugf("Error getting size: %s", err) + logrus.WithError(err).Debug("Error getting TTY size") if ws == nil { return 0, 0 } @@ -43,8 +50,9 @@ func (o *Out) GetTtySize() (uint, uint) { return uint(ws.Height), uint(ws.Width) } -// NewOut returns a new Out object from a Writer +// NewOut returns a new [Out] from an [io.Writer]. func NewOut(out io.Writer) *Out { - fd, isTerminal := term.GetFdInfo(out) - return &Out{commonStream: commonStream{fd: fd, isTerminal: isTerminal}, out: out} + o := &Out{out: out} + o.fd, o.isTerminal = term.GetFdInfo(out) + return o } diff --git a/vendor/github.com/docker/cli/cli/streams/stream.go b/vendor/github.com/docker/cli/cli/streams/stream.go index 21f0e452..54c9cda0 100644 --- a/vendor/github.com/docker/cli/cli/streams/stream.go +++ b/vendor/github.com/docker/cli/cli/streams/stream.go @@ -4,31 +4,32 @@ import ( "github.com/moby/term" ) -// commonStream is an input stream used by the DockerCli to read user input type commonStream struct { fd uintptr isTerminal bool state *term.State } -// FD returns the file descriptor number for this stream +// FD returns the file descriptor number for this stream. func (s *commonStream) FD() uintptr { return s.fd } -// IsTerminal returns true if this stream is connected to a terminal +// IsTerminal returns true if this stream is connected to a terminal. func (s *commonStream) IsTerminal() bool { return s.isTerminal } -// RestoreTerminal restores normal mode to the terminal +// RestoreTerminal restores normal mode to the terminal. func (s *commonStream) RestoreTerminal() { if s.state != nil { - term.RestoreTerminal(s.fd, s.state) + _ = term.RestoreTerminal(s.fd, s.state) } } -// SetIsTerminal sets the boolean used for isTerminal +// SetIsTerminal overrides whether a terminal is connected. It is used to +// override this property in unit-tests, and should not be depended on for +// other purposes. func (s *commonStream) SetIsTerminal(isTerminal bool) { s.isTerminal = isTerminal } diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index 8c0c23b2..b7cd00b0 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -3,13 +3,13 @@ // // Grammar // -// reference := name [ ":" tag ] [ "@" digest ] +// reference := name [ ":" tag ] [ "@" digest ] // name := [domain '/'] path-component ['/' path-component]* // domain := domain-component ['.' domain-component]* [':' port-number] // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ +// alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 0728bfe1..b3141819 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -29,6 +29,7 @@ Adam Pointer Adam Singer Adam Walz Adam Williams +AdamKorcz Addam Hardy Aditi Rajagopal Aditya @@ -81,6 +82,7 @@ Alex Goodman Alex Nordlund Alex Olshansky Alex Samorukov +Alex Stockinger Alex Warhawk Alexander Artemenko Alexander Boyd @@ -198,6 +200,7 @@ Anusha Ragunathan Anyu Wang apocas Arash Deshmeh +arcosx ArikaChen Arko Dasgupta Arnaud Lefebvre @@ -241,6 +244,7 @@ Benjamin Atkin Benjamin Baker Benjamin Boudreau Benjamin Böhmke +Benjamin Wang Benjamin Yolken Benny Ng Benoit Chesneau @@ -634,6 +638,7 @@ Eng Zer Jun Enguerran Eohyung Lee epeterso +er0k Eric Barch Eric Curtin Eric G. Noriega @@ -754,6 +759,7 @@ Félix Baylac-Jacqué Félix Cantournet Gabe Rosenhouse Gabor Nagy +Gabriel Adrian Samfira Gabriel Goller Gabriel L. Somlo Gabriel Linder @@ -855,6 +861,7 @@ Hongbin Lu Hongxu Jia Honza Pokorny Hsing-Hui Hsu +Hsing-Yu (David) Chen hsinko <21551195@zju.edu.cn> Hu Keping Hu Tao @@ -887,6 +894,7 @@ Igor Dolzhikov Igor Karpovich Iliana Weller Ilkka Laukkanen +Illia Antypenko Illo Abdulrahim Ilya Dmitrichenko Ilya Gusev @@ -938,6 +946,7 @@ Jamie Hannaford Jamshid Afshar Jan Breig Jan Chren +Jan Garcia Jan Götte Jan Keromnes Jan Koprowski @@ -1206,6 +1215,7 @@ Kimbro Staken Kir Kolyshkin Kiran Gangadharan Kirill SIbirev +Kirk Easterson knappe Kohei Tsuruta Koichi Shiraishi @@ -1240,10 +1250,12 @@ Lars Kellogg-Stedman Lars R. Damerow Lars-Magnus Skog Laszlo Meszaros +Laura Brehm Laura Frank Laurent Bernaille Laurent Erignoux Laurie Voss +Leandro Motta Barros Leandro Siqueira Lee Calcote Lee Chao <932819864@qq.com> @@ -1563,6 +1575,7 @@ Nick Neisen Nick Parker Nick Payne Nick Russo +Nick Santos Nick Stenning Nick Stinemates Nick Wood @@ -1584,6 +1597,7 @@ NikolaMandic Nikolas Garofil Nikolay Edigaryev Nikolay Milovanov +ningmingxiao Nirmal Mehta Nishant Totla NIWA Hideyuki @@ -1615,6 +1629,7 @@ Omri Shiv Onur Filiz Oriol Francès Oscar Bonilla <6f6231@gmail.com> +oscar.chen <2972789494@qq.com> Oskar Niburski Otto Kekäläinen Ouyang Liduo @@ -1822,6 +1837,7 @@ Rory Hunter Rory McCune Ross Boucher Rovanion Luckey +Roy Reznik Royce Remer Rozhnov Alexandr Rudolph Gottesheim @@ -2271,6 +2287,7 @@ Xiaoyu Zhang xichengliudui <1693291525@qq.com> xiekeyang Ximo Guanter Gonzálbez +xin.li Xinbo Weng Xinfeng Liu Xinzi Zhou @@ -2282,6 +2299,7 @@ Yahya yalpul YAMADA Tsuyoshi Yamasaki Masahide +Yamazaki Masashi Yan Feng Yan Zhu Yang Bai diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go index 7df039b4..ded1c7c8 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -64,13 +64,14 @@ func stick(f string) error { // GetDataHome returns XDG_DATA_HOME. // GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// If HOME and XDG_DATA_HOME are not set, getpwent(3) is consulted to determine the users home directory. // // See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html func GetDataHome() (string, error) { if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { return xdgDataHome, nil } - home := os.Getenv("HOME") + home := Get() if home == "" { return "", errors.New("could not get either XDG_DATA_HOME or HOME") } @@ -79,13 +80,14 @@ func GetDataHome() (string, error) { // GetConfigHome returns XDG_CONFIG_HOME. // GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// If HOME and XDG_CONFIG_HOME are not set, getpwent(3) is consulted to determine the users home directory. // // See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html func GetConfigHome() (string, error) { if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { return xdgConfigHome, nil } - home := os.Getenv("HOME") + home := Get() if home == "" { return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") } @@ -93,8 +95,9 @@ func GetConfigHome() (string, error) { } // GetLibHome returns $HOME/.local/lib +// If HOME is not set, getpwent(3) is consulted to determine the users home directory. func GetLibHome() (string, error) { - home := os.Getenv("HOME") + home := Get() if home == "" { return "", errors.New("could not get HOME") } diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 74a37815..352018e7 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,10 +1,26 @@ # Change history of go-restful -## [v3.9.0] - 20221-07-21 +## [v3.10.2] - 2023-03-09 + +- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0 + see comment in Readme how to customize this behaviour. + +## [v3.10.1] - 2022-11-19 + +- fix broken 3.10.0 by using path package for joining paths + +## [v3.10.0] - 2022-10-11 - BROKEN + +- changed tokenizer to match std route match behavior; do not trimright the path (#511) +- Add MIME_ZIP (#512) +- Add MIME_ZIP and HEADER_ContentDisposition (#513) +- Changed how to get query parameter issue #510 + +## [v3.9.0] - 2022-07-21 - add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci) -## [v3.8.0] - 20221-06-06 +## [v3.8.0] - 2022-06-06 - use exact matching of allowed domain entries, issue #489 (#493) - this changes fixes [security] Authorization Bypass Through User-Controlled Key diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 0625359d..85da9012 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -96,6 +96,10 @@ There are several hooks to customize the behavior of the go-restful package. - Compression - Encoders for other serializers - Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` +- Use the variable `MergePathStrategy` to change the behaviour of composing the Route path given a root path and a local route path + - versions >= 3.10.1 has set the value to `PathJoinStrategy` that fixes a reported [security issue](https://github.com/advisories/GHSA-r48q-9g5r-8q2h) but may cause your services not to work correctly anymore. + - versions <= 3.9 had the behaviour that can be restored in newer versions by setting the value to `TrimSlashStrategy`. + - you can set value to a custom implementation (must implement MergePathStrategyFunc) ## Resources diff --git a/vendor/github.com/emicklei/go-restful/v3/constants.go b/vendor/github.com/emicklei/go-restful/v3/constants.go index 203439c5..2328bde6 100644 --- a/vendor/github.com/emicklei/go-restful/v3/constants.go +++ b/vendor/github.com/emicklei/go-restful/v3/constants.go @@ -7,12 +7,14 @@ package restful const ( MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() + MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces() MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default HEADER_Allow = "Allow" HEADER_Accept = "Accept" HEADER_Origin = "Origin" HEADER_ContentType = "Content-Type" + HEADER_ContentDisposition = "Content-Disposition" HEADER_LastModified = "Last-Modified" HEADER_AcceptEncoding = "Accept-Encoding" HEADER_ContentEncoding = "Content-Encoding" diff --git a/vendor/github.com/emicklei/go-restful/v3/request.go b/vendor/github.com/emicklei/go-restful/v3/request.go index 5725a075..0020095e 100644 --- a/vendor/github.com/emicklei/go-restful/v3/request.go +++ b/vendor/github.com/emicklei/go-restful/v3/request.go @@ -31,7 +31,8 @@ func NewRequest(httpRequest *http.Request) *Request { // a "Unable to unmarshal content of type:" response is returned. // Valid values are restful.MIME_JSON and restful.MIME_XML // Example: -// restful.DefaultRequestContentType(restful.MIME_JSON) +// +// restful.DefaultRequestContentType(restful.MIME_JSON) func DefaultRequestContentType(mime string) { defaultRequestContentType = mime } @@ -48,7 +49,7 @@ func (r *Request) PathParameters() map[string]string { // QueryParameter returns the (first) Query parameter value by its name func (r *Request) QueryParameter(name string) string { - return r.Request.FormValue(name) + return r.Request.URL.Query().Get(name) } // QueryParameters returns the all the query parameters values by name diff --git a/vendor/github.com/emicklei/go-restful/v3/response.go b/vendor/github.com/emicklei/go-restful/v3/response.go index 8f0b56aa..a41a92cc 100644 --- a/vendor/github.com/emicklei/go-restful/v3/response.go +++ b/vendor/github.com/emicklei/go-restful/v3/response.go @@ -109,6 +109,9 @@ func (r *Response) EntityWriter() (EntityReaderWriter, bool) { if DefaultResponseMimeType == MIME_XML { return entityAccessRegistry.accessorAt(MIME_XML) } + if DefaultResponseMimeType == MIME_ZIP { + return entityAccessRegistry.accessorAt(MIME_ZIP) + } // Fallback to whatever the route says it can produce. // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html for _, each := range r.routeProduces { diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index 193f4a6b..ea05b3da 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -164,7 +164,7 @@ func tokenizePath(path string) []string { if "/" == path { return nil } - return strings.Split(strings.Trim(path, "/"), "/") + return strings.Split(strings.TrimLeft(path, "/"), "/") } // for debugging @@ -176,3 +176,5 @@ func (r *Route) String() string { func (r *Route) EnableContentEncoding(enabled bool) { r.contentEncodingEnabled = &enabled } + +var TrimRightSlashEnabled = false diff --git a/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go index 23641b6d..827f471d 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -7,6 +7,7 @@ package restful import ( "fmt" "os" + "path" "reflect" "runtime" "strings" @@ -46,11 +47,12 @@ type RouteBuilder struct { // Do evaluates each argument with the RouteBuilder itself. // This allows you to follow DRY principles without breaking the fluent programming style. // Example: -// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) // -// func Returns500(b *RouteBuilder) { -// b.Returns(500, "Internal Server Error", restful.ServiceError{}) -// } +// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) +// +// func Returns500(b *RouteBuilder) { +// b.Returns(500, "Internal Server Error", restful.ServiceError{}) +// } func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { for _, each := range oneArgBlocks { each(b) @@ -351,8 +353,28 @@ func (b *RouteBuilder) Build() Route { return route } -func concatPath(path1, path2 string) string { - return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") +type MergePathStrategyFunc func(rootPath, routePath string) string + +var ( + // behavior >= 3.10 + PathJoinStrategy = func(rootPath, routePath string) string { + return path.Join(rootPath, routePath) + } + + // behavior <= 3.9 + TrimSlashStrategy = func(rootPath, routePath string) string { + return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/") + } + + // MergePathStrategy is the active strategy for merging a Route path when building the routing of all WebServices. + // The value is set to PathJoinStrategy + // PathJoinStrategy is a strategy that is more strict [Security - PRISMA-2022-0227] + MergePathStrategy = PathJoinStrategy +) + +// merge two paths using the current (package global) merge path strategy. +func concatPath(rootPath, routePath string) string { + return MergePathStrategy(rootPath, routePath) } var anonymousFuncCount int32 diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md deleted file mode 100644 index 25fdaf63..00000000 --- a/vendor/github.com/fatih/color/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md deleted file mode 100644 index 5152bf59..00000000 --- a/vendor/github.com/fatih/color/README.md +++ /dev/null @@ -1,178 +0,0 @@ -# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color) - -Color lets you use colorized outputs in terms of [ANSI Escape -Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It -has support for Windows too! The API can be used in several ways, pick one that -suits you. - -![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) - - -## Install - -```bash -go get github.com/fatih/color -``` - -## Examples - -### Standard colors - -```go -// Print with default helper functions -color.Cyan("Prints text in cyan.") - -// A newline will be appended automatically -color.Blue("Prints %s in blue.", "text") - -// These are using the default foreground colors -color.Red("We have red") -color.Magenta("And many others ..") - -``` - -### Mix and reuse colors - -```go -// Create a new color object -c := color.New(color.FgCyan).Add(color.Underline) -c.Println("Prints cyan text with an underline.") - -// Or just add them to New() -d := color.New(color.FgCyan, color.Bold) -d.Printf("This prints bold cyan %s\n", "too!.") - -// Mix up foreground and background colors, create new mixes! -red := color.New(color.FgRed) - -boldRed := red.Add(color.Bold) -boldRed.Println("This will print text in bold red.") - -whiteBackground := red.Add(color.BgWhite) -whiteBackground.Println("Red text with white background.") -``` - -### Use your own output (io.Writer) - -```go -// Use your own io.Writer output -color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - -blue := color.New(color.FgBlue) -blue.Fprint(writer, "This will print text in blue.") -``` - -### Custom print functions (PrintFunc) - -```go -// Create a custom print function for convenience -red := color.New(color.FgRed).PrintfFunc() -red("Warning") -red("Error: %s", err) - -// Mix up multiple attributes -notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() -notice("Don't forget this...") -``` - -### Custom fprint functions (FprintFunc) - -```go -blue := color.New(color.FgBlue).FprintfFunc() -blue(myWriter, "important notice: %s", stars) - -// Mix up with multiple attributes -success := color.New(color.Bold, color.FgGreen).FprintlnFunc() -success(myWriter, "Don't forget this...") -``` - -### Insert into noncolor strings (SprintFunc) - -```go -// Create SprintXxx functions to mix strings with other non-colorized strings: -yellow := color.New(color.FgYellow).SprintFunc() -red := color.New(color.FgRed).SprintFunc() -fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) - -info := color.New(color.FgWhite, color.BgGreen).SprintFunc() -fmt.Printf("This %s rocks!\n", info("package")) - -// Use helper functions -fmt.Println("This", color.RedString("warning"), "should be not neglected.") -fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") - -// Windows supported too! Just don't forget to change the output to color.Output -fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) -``` - -### Plug into existing code - -```go -// Use handy standard colors -color.Set(color.FgYellow) - -fmt.Println("Existing text will now be in yellow") -fmt.Printf("This one %s\n", "too") - -color.Unset() // Don't forget to unset - -// You can mix up parameters -color.Set(color.FgMagenta, color.Bold) -defer color.Unset() // Use it in your function - -fmt.Println("All text will now be bold magenta.") -``` - -### Disable/Enable color - -There might be a case where you want to explicitly disable/enable color output. the -`go-isatty` package will automatically disable color output for non-tty output streams -(for example if the output were piped directly to `less`). - -The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment -variable is set (regardless of its value). - -`Color` has support to disable/enable colors programatically both globally and -for single color definitions. For example suppose you have a CLI app and a -`--no-color` bool flag. You can easily disable the color output with: - -```go -var flagNoColor = flag.Bool("no-color", false, "Disable color output") - -if *flagNoColor { - color.NoColor = true // disables colorized output -} -``` - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - -```go -c := color.New(color.FgCyan) -c.Println("Prints cyan text") - -c.DisableColor() -c.Println("This is printed without any color") - -c.EnableColor() -c.Println("This prints again cyan...") -``` - -## GitHub Actions - -To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams. - -## Todo - -* Save/Return previous values -* Evaluate fmt.Formatter interface - - -## Credits - - * [Fatih Arslan](https://github.com/fatih) - * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) - -## License - -The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go deleted file mode 100644 index 98a60f3c..00000000 --- a/vendor/github.com/fatih/color/color.go +++ /dev/null @@ -1,618 +0,0 @@ -package color - -import ( - "fmt" - "io" - "os" - "strconv" - "strings" - "sync" - - "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" -) - -var ( - // NoColor defines if the output is colorized or not. It's dynamically set to - // false or true based on the stdout's file descriptor referring to a terminal - // or not. It's also set to true if the NO_COLOR environment variable is - // set (regardless of its value). This is a global option and affects all - // colors. For more control over each color block use the methods - // DisableColor() individually. - NoColor = noColorExists() || os.Getenv("TERM") == "dumb" || - (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) - - // Output defines the standard output of the print functions. By default - // os.Stdout is used. - Output = colorable.NewColorableStdout() - - // Error defines a color supporting writer for os.Stderr. - Error = colorable.NewColorableStderr() - - // colorsCache is used to reduce the count of created Color objects and - // allows to reuse already created objects with required Attribute. - colorsCache = make(map[Attribute]*Color) - colorsCacheMu sync.Mutex // protects colorsCache -) - -// noColorExists returns true if the environment variable NO_COLOR exists. -func noColorExists() bool { - _, exists := os.LookupEnv("NO_COLOR") - return exists -} - -// Color defines a custom color object which is defined by SGR parameters. -type Color struct { - params []Attribute - noColor *bool -} - -// Attribute defines a single SGR Code -type Attribute int - -const escape = "\x1b" - -// Base attributes -const ( - Reset Attribute = iota - Bold - Faint - Italic - Underline - BlinkSlow - BlinkRapid - ReverseVideo - Concealed - CrossedOut -) - -// Foreground text colors -const ( - FgBlack Attribute = iota + 30 - FgRed - FgGreen - FgYellow - FgBlue - FgMagenta - FgCyan - FgWhite -) - -// Foreground Hi-Intensity text colors -const ( - FgHiBlack Attribute = iota + 90 - FgHiRed - FgHiGreen - FgHiYellow - FgHiBlue - FgHiMagenta - FgHiCyan - FgHiWhite -) - -// Background text colors -const ( - BgBlack Attribute = iota + 40 - BgRed - BgGreen - BgYellow - BgBlue - BgMagenta - BgCyan - BgWhite -) - -// Background Hi-Intensity text colors -const ( - BgHiBlack Attribute = iota + 100 - BgHiRed - BgHiGreen - BgHiYellow - BgHiBlue - BgHiMagenta - BgHiCyan - BgHiWhite -) - -// New returns a newly created color object. -func New(value ...Attribute) *Color { - c := &Color{ - params: make([]Attribute, 0), - } - - if noColorExists() { - c.noColor = boolPtr(true) - } - - c.Add(value...) - return c -} - -// Set sets the given parameters immediately. It will change the color of -// output with the given SGR parameters until color.Unset() is called. -func Set(p ...Attribute) *Color { - c := New(p...) - c.Set() - return c -} - -// Unset resets all escape attributes and clears the output. Usually should -// be called after Set(). -func Unset() { - if NoColor { - return - } - - fmt.Fprintf(Output, "%s[%dm", escape, Reset) -} - -// Set sets the SGR sequence. -func (c *Color) Set() *Color { - if c.isNoColorSet() { - return c - } - - fmt.Fprintf(Output, c.format()) - return c -} - -func (c *Color) unset() { - if c.isNoColorSet() { - return - } - - Unset() -} - -func (c *Color) setWriter(w io.Writer) *Color { - if c.isNoColorSet() { - return c - } - - fmt.Fprintf(w, c.format()) - return c -} - -func (c *Color) unsetWriter(w io.Writer) { - if c.isNoColorSet() { - return - } - - if NoColor { - return - } - - fmt.Fprintf(w, "%s[%dm", escape, Reset) -} - -// Add is used to chain SGR parameters. Use as many as parameters to combine -// and create custom color objects. Example: Add(color.FgRed, color.Underline). -func (c *Color) Add(value ...Attribute) *Color { - c.params = append(c.params, value...) - return c -} - -func (c *Color) prepend(value Attribute) { - c.params = append(c.params, 0) - copy(c.params[1:], c.params[0:]) - c.params[0] = value -} - -// Fprint formats using the default formats for its operands and writes to w. -// Spaces are added between operands when neither is a string. -// It returns the number of bytes written and any write error encountered. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprint(w, a...) -} - -// Print formats using the default formats for its operands and writes to -// standard output. Spaces are added between operands when neither is a -// string. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Print(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprint(Output, a...) -} - -// Fprintf formats according to a format specifier and writes to w. -// It returns the number of bytes written and any write error encountered. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprintf(w, format, a...) -} - -// Printf formats according to a format specifier and writes to standard output. -// It returns the number of bytes written and any write error encountered. -// This is the standard fmt.Printf() method wrapped with the given color. -func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintf(Output, format, a...) -} - -// Fprintln formats using the default formats for its operands and writes to w. -// Spaces are always added between operands and a newline is appended. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprintln(w, a...) -} - -// Println formats using the default formats for its operands and writes to -// standard output. Spaces are always added between operands and a newline is -// appended. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Println(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintln(Output, a...) -} - -// Sprint is just like Print, but returns a string instead of printing it. -func (c *Color) Sprint(a ...interface{}) string { - return c.wrap(fmt.Sprint(a...)) -} - -// Sprintln is just like Println, but returns a string instead of printing it. -func (c *Color) Sprintln(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) -} - -// Sprintf is just like Printf, but returns a string instead of printing it. -func (c *Color) Sprintf(format string, a ...interface{}) string { - return c.wrap(fmt.Sprintf(format, a...)) -} - -// FprintFunc returns a new function that prints the passed arguments as -// colorized with color.Fprint(). -func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { - return func(w io.Writer, a ...interface{}) { - c.Fprint(w, a...) - } -} - -// PrintFunc returns a new function that prints the passed arguments as -// colorized with color.Print(). -func (c *Color) PrintFunc() func(a ...interface{}) { - return func(a ...interface{}) { - c.Print(a...) - } -} - -// FprintfFunc returns a new function that prints the passed arguments as -// colorized with color.Fprintf(). -func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { - return func(w io.Writer, format string, a ...interface{}) { - c.Fprintf(w, format, a...) - } -} - -// PrintfFunc returns a new function that prints the passed arguments as -// colorized with color.Printf(). -func (c *Color) PrintfFunc() func(format string, a ...interface{}) { - return func(format string, a ...interface{}) { - c.Printf(format, a...) - } -} - -// FprintlnFunc returns a new function that prints the passed arguments as -// colorized with color.Fprintln(). -func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { - return func(w io.Writer, a ...interface{}) { - c.Fprintln(w, a...) - } -} - -// PrintlnFunc returns a new function that prints the passed arguments as -// colorized with color.Println(). -func (c *Color) PrintlnFunc() func(a ...interface{}) { - return func(a ...interface{}) { - c.Println(a...) - } -} - -// SprintFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprint(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output, example: -// -// put := New(FgYellow).SprintFunc() -// fmt.Fprintf(color.Output, "This is a %s", put("warning")) -func (c *Color) SprintFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprint(a...)) - } -} - -// SprintfFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintf(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output. -func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { - return func(format string, a ...interface{}) string { - return c.wrap(fmt.Sprintf(format, a...)) - } -} - -// SprintlnFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintln(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output. -func (c *Color) SprintlnFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) - } -} - -// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" -// an example output might be: "1;36" -> bold cyan -func (c *Color) sequence() string { - format := make([]string, len(c.params)) - for i, v := range c.params { - format[i] = strconv.Itoa(int(v)) - } - - return strings.Join(format, ";") -} - -// wrap wraps the s string with the colors attributes. The string is ready to -// be printed. -func (c *Color) wrap(s string) string { - if c.isNoColorSet() { - return s - } - - return c.format() + s + c.unformat() -} - -func (c *Color) format() string { - return fmt.Sprintf("%s[%sm", escape, c.sequence()) -} - -func (c *Color) unformat() string { - return fmt.Sprintf("%s[%dm", escape, Reset) -} - -// DisableColor disables the color output. Useful to not change any existing -// code and still being able to output. Can be used for flags like -// "--no-color". To enable back use EnableColor() method. -func (c *Color) DisableColor() { - c.noColor = boolPtr(true) -} - -// EnableColor enables the color output. Use it in conjunction with -// DisableColor(). Otherwise this method has no side effects. -func (c *Color) EnableColor() { - c.noColor = boolPtr(false) -} - -func (c *Color) isNoColorSet() bool { - // check first if we have user set action - if c.noColor != nil { - return *c.noColor - } - - // if not return the global option, which is disabled by default - return NoColor -} - -// Equals returns a boolean value indicating whether two colors are equal. -func (c *Color) Equals(c2 *Color) bool { - if len(c.params) != len(c2.params) { - return false - } - - for _, attr := range c.params { - if !c2.attrExists(attr) { - return false - } - } - - return true -} - -func (c *Color) attrExists(a Attribute) bool { - for _, attr := range c.params { - if attr == a { - return true - } - } - - return false -} - -func boolPtr(v bool) *bool { - return &v -} - -func getCachedColor(p Attribute) *Color { - colorsCacheMu.Lock() - defer colorsCacheMu.Unlock() - - c, ok := colorsCache[p] - if !ok { - c = New(p) - colorsCache[p] = c - } - - return c -} - -func colorPrint(format string, p Attribute, a ...interface{}) { - c := getCachedColor(p) - - if !strings.HasSuffix(format, "\n") { - format += "\n" - } - - if len(a) == 0 { - c.Print(format) - } else { - c.Printf(format, a...) - } -} - -func colorString(format string, p Attribute, a ...interface{}) string { - c := getCachedColor(p) - - if len(a) == 0 { - return c.SprintFunc()(format) - } - - return c.SprintfFunc()(format, a...) -} - -// Black is a convenient helper function to print with black foreground. A -// newline is appended to format by default. -func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } - -// Red is a convenient helper function to print with red foreground. A -// newline is appended to format by default. -func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } - -// Green is a convenient helper function to print with green foreground. A -// newline is appended to format by default. -func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } - -// Yellow is a convenient helper function to print with yellow foreground. -// A newline is appended to format by default. -func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } - -// Blue is a convenient helper function to print with blue foreground. A -// newline is appended to format by default. -func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } - -// Magenta is a convenient helper function to print with magenta foreground. -// A newline is appended to format by default. -func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } - -// Cyan is a convenient helper function to print with cyan foreground. A -// newline is appended to format by default. -func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } - -// White is a convenient helper function to print with white foreground. A -// newline is appended to format by default. -func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } - -// BlackString is a convenient helper function to return a string with black -// foreground. -func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } - -// RedString is a convenient helper function to return a string with red -// foreground. -func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } - -// GreenString is a convenient helper function to return a string with green -// foreground. -func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } - -// YellowString is a convenient helper function to return a string with yellow -// foreground. -func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } - -// BlueString is a convenient helper function to return a string with blue -// foreground. -func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } - -// MagentaString is a convenient helper function to return a string with magenta -// foreground. -func MagentaString(format string, a ...interface{}) string { - return colorString(format, FgMagenta, a...) -} - -// CyanString is a convenient helper function to return a string with cyan -// foreground. -func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } - -// WhiteString is a convenient helper function to return a string with white -// foreground. -func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } - -// HiBlack is a convenient helper function to print with hi-intensity black foreground. A -// newline is appended to format by default. -func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } - -// HiRed is a convenient helper function to print with hi-intensity red foreground. A -// newline is appended to format by default. -func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } - -// HiGreen is a convenient helper function to print with hi-intensity green foreground. A -// newline is appended to format by default. -func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } - -// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. -// A newline is appended to format by default. -func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } - -// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A -// newline is appended to format by default. -func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } - -// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. -// A newline is appended to format by default. -func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } - -// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A -// newline is appended to format by default. -func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } - -// HiWhite is a convenient helper function to print with hi-intensity white foreground. A -// newline is appended to format by default. -func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } - -// HiBlackString is a convenient helper function to return a string with hi-intensity black -// foreground. -func HiBlackString(format string, a ...interface{}) string { - return colorString(format, FgHiBlack, a...) -} - -// HiRedString is a convenient helper function to return a string with hi-intensity red -// foreground. -func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } - -// HiGreenString is a convenient helper function to return a string with hi-intensity green -// foreground. -func HiGreenString(format string, a ...interface{}) string { - return colorString(format, FgHiGreen, a...) -} - -// HiYellowString is a convenient helper function to return a string with hi-intensity yellow -// foreground. -func HiYellowString(format string, a ...interface{}) string { - return colorString(format, FgHiYellow, a...) -} - -// HiBlueString is a convenient helper function to return a string with hi-intensity blue -// foreground. -func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } - -// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta -// foreground. -func HiMagentaString(format string, a ...interface{}) string { - return colorString(format, FgHiMagenta, a...) -} - -// HiCyanString is a convenient helper function to return a string with hi-intensity cyan -// foreground. -func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } - -// HiWhiteString is a convenient helper function to return a string with hi-intensity white -// foreground. -func HiWhiteString(format string, a ...interface{}) string { - return colorString(format, FgHiWhite, a...) -} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go deleted file mode 100644 index 04541de7..00000000 --- a/vendor/github.com/fatih/color/doc.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Package color is an ANSI color package to output colorized or SGR defined -output to the standard output. The API can be used in several way, pick one -that suits you. - -Use simple and default helper functions with predefined foreground colors: - - color.Cyan("Prints text in cyan.") - - // a newline will be appended automatically - color.Blue("Prints %s in blue.", "text") - - // More default foreground colors.. - color.Red("We have red") - color.Yellow("Yellow color too!") - color.Magenta("And many others ..") - - // Hi-intensity colors - color.HiGreen("Bright green color.") - color.HiBlack("Bright black means gray..") - color.HiWhite("Shiny white color!") - -However there are times where custom color mixes are required. Below are some -examples to create custom color objects and use the print functions of each -separate color object. - - // Create a new color object - c := color.New(color.FgCyan).Add(color.Underline) - c.Println("Prints cyan text with an underline.") - - // Or just add them to New() - d := color.New(color.FgCyan, color.Bold) - d.Printf("This prints bold cyan %s\n", "too!.") - - - // Mix up foreground and background colors, create new mixes! - red := color.New(color.FgRed) - - boldRed := red.Add(color.Bold) - boldRed.Println("This will print text in bold red.") - - whiteBackground := red.Add(color.BgWhite) - whiteBackground.Println("Red text with White background.") - - // Use your own io.Writer output - color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - - blue := color.New(color.FgBlue) - blue.Fprint(myWriter, "This will print text in blue.") - -You can create PrintXxx functions to simplify even more: - - // Create a custom print function for convenient - red := color.New(color.FgRed).PrintfFunc() - red("warning") - red("error: %s", err) - - // Mix up multiple attributes - notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() - notice("don't forget this...") - -You can also FprintXxx functions to pass your own io.Writer: - - blue := color.New(FgBlue).FprintfFunc() - blue(myWriter, "important notice: %s", stars) - - // Mix up with multiple attributes - success := color.New(color.Bold, color.FgGreen).FprintlnFunc() - success(myWriter, don't forget this...") - - -Or create SprintXxx functions to mix strings with other non-colorized strings: - - yellow := New(FgYellow).SprintFunc() - red := New(FgRed).SprintFunc() - - fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Printf("this %s rocks!\n", info("package")) - -Windows support is enabled by default. All Print functions work as intended. -However only for color.SprintXXX functions, user should use fmt.FprintXXX and -set the output to color.Output: - - fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) - -Using with existing code is possible. Just use the Set() method to set the -standard output to the given parameters. That way a rewrite of an existing -code is not required. - - // Use handy standard colors. - color.Set(color.FgYellow) - - fmt.Println("Existing text will be now in Yellow") - fmt.Printf("This one %s\n", "too") - - color.Unset() // don't forget to unset - - // You can mix up parameters - color.Set(color.FgMagenta, color.Bold) - defer color.Unset() // use it in your function - - fmt.Println("All text will be now bold magenta.") - -There might be a case where you want to disable color output (for example to -pipe the standard output of your app to somewhere else). `Color` has support to -disable colors both globally and for single color definition. For example -suppose you have a CLI app and a `--no-color` bool flag. You can easily disable -the color output with: - - var flagNoColor = flag.Bool("no-color", false, "Disable color output") - - if *flagNoColor { - color.NoColor = true // disables colorized output - } - -You can also disable the color by setting the NO_COLOR environment variable to any value. - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - - c := color.New(color.FgCyan) - c.Println("Prints cyan text") - - c.DisableColor() - c.Println("This is printed without any color") - - c.EnableColor() - c.Println("This prints again cyan...") -*/ -package color diff --git a/vendor/github.com/go-jose/go-jose/v3/.gitignore b/vendor/github.com/go-jose/go-jose/v3/.gitignore new file mode 100644 index 00000000..eb29ebae --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.gitignore @@ -0,0 +1,2 @@ +jose-util/jose-util +jose-util.t.err \ No newline at end of file diff --git a/vendor/github.com/go-jose/go-jose/v3/.golangci.yml b/vendor/github.com/go-jose/go-jose/v3/.golangci.yml new file mode 100644 index 00000000..2a577a8f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.golangci.yml @@ -0,0 +1,53 @@ +# https://github.com/golangci/golangci-lint + +run: + skip-files: + - doc_test.go + modules-download-mode: readonly + +linters: + enable-all: true + disable: + - gochecknoglobals + - goconst + - lll + - maligned + - nakedret + - scopelint + - unparam + - funlen # added in 1.18 (requires go-jose changes before it can be enabled) + +linters-settings: + gocyclo: + min-complexity: 35 + +issues: + exclude-rules: + - text: "don't use ALL_CAPS in Go names" + linters: + - golint + - text: "hardcoded credentials" + linters: + - gosec + - text: "weak cryptographic primitive" + linters: + - gosec + - path: json/ + linters: + - dupl + - errcheck + - gocritic + - gocyclo + - golint + - govet + - ineffassign + - staticcheck + - structcheck + - stylecheck + - unused + - path: _test\.go + linters: + - scopelint + - path: jwk.go + linters: + - gocyclo diff --git a/vendor/github.com/go-jose/go-jose/v3/.travis.yml b/vendor/github.com/go-jose/go-jose/v3/.travis.yml new file mode 100644 index 00000000..48de631b --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.travis.yml @@ -0,0 +1,33 @@ +language: go + +matrix: + fast_finish: true + allow_failures: + - go: tip + +go: + - "1.13.x" + - "1.14.x" + - tip + +before_script: + - export PATH=$HOME/.local/bin:$PATH + +before_install: + - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0 + - pip install cram --user + +script: + - go test -v -covermode=count -coverprofile=profile.cov . + - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner + - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher + - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt + - go test -v ./json # no coverage for forked encoding/json package + - golangci-lint run + - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util + - cd .. + +after_success: + - gocovmerge *.cov */*.cov > merged.coverprofile + - goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md new file mode 100644 index 00000000..3305db0f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md @@ -0,0 +1,10 @@ +Serious about security +====================== + +Square recognizes the important contributions the security research community +can make. We therefore encourage reporting security issues with the code +contained in this repository. + +If you believe you have discovered a security vulnerability, please follow the +guidelines at . + diff --git a/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md new file mode 100644 index 00000000..b63e1f8f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing + +If you would like to contribute code to go-jose you can do so through GitHub by +forking the repository and sending a pull request. + +When submitting code, please make every effort to follow existing conventions +and style in order to keep the code as readable as possible. Please also make +sure all tests pass by running `go test`, and format your code with `go fmt`. +We also recommend using `golint` and `errcheck`. + +Before your code can be accepted into the project you must also sign the +Individual Contributor License Agreement. We use [cla-assistant.io][1] and you +will be prompted to sign once a pull request is opened. + +[1]: https://cla-assistant.io/ diff --git a/vendor/github.com/go-jose/go-jose/v3/LICENSE b/vendor/github.com/go-jose/go-jose/v3/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-jose/go-jose/v3/README.md b/vendor/github.com/go-jose/go-jose/v3/README.md new file mode 100644 index 00000000..b90c7e5c --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/README.md @@ -0,0 +1,122 @@ +# Go JOSE + +[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) +[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) +[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) +[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=master)](https://travis-ci.org/go-jose/go-jose) +[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=master)](https://coveralls.io/r/go-jose/go-jose) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. This includes support for JSON Web Encryption, +JSON Web Signature, and JSON Web Token standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. +Tables of supported algorithms are shown below. The library supports both +the compact and JWS/JWE JSON Serialization formats, and has optional support for +multiple recipients. It also comes with a small command-line utility +([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. + +### Versions + +[Version 2](https://gopkg.in/go-jose/go-jose.v2) +([branch](https://github.com/go-jose/go-jose/tree/v2), +[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version: + + import "gopkg.in/go-jose/go-jose.v2" + +[Version 3](https://github.com/go-jose/go-jose) +([branch](https://github.com/go-jose/go-jose/tree/master), +[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet): + + import "github.com/go-jose/go-jose/v3" + +All new feature development takes place on the `master` branch, which we are +preparing to release as version 3 soon. Version 2 will continue to receive +critical bug and security fixes. Note that starting with version 3 we are +using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher. + +Version 1 (on the `v1` branch) is frozen and not supported anymore. + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +standard where possible. The Godoc reference has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + Ed25519 | EdDSA2 + +2. Only available in version 2 of the package + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which +allows attaching a key id. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey) + AES, HMAC | []byte + +1. Only available in version 2 or later of the package + +## Examples + +[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) +[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) + +Examples can be found in the Godoc +reference for this package. The +[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util) +subdirectory also contains a small command-line utility which might be useful +as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go new file mode 100644 index 00000000..78abc326 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go @@ -0,0 +1,592 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto" + "crypto/aes" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "errors" + "fmt" + "math/big" + + josecipher "github.com/go-jose/go-jose/v3/cipher" + "github.com/go-jose/go-jose/v3/json" +) + +// A generic RSA-based encrypter/verifier +type rsaEncrypterVerifier struct { + publicKey *rsa.PublicKey +} + +// A generic RSA-based decrypter/signer +type rsaDecrypterSigner struct { + privateKey *rsa.PrivateKey +} + +// A generic EC-based encrypter/verifier +type ecEncrypterVerifier struct { + publicKey *ecdsa.PublicKey +} + +type edEncrypterVerifier struct { + publicKey ed25519.PublicKey +} + +// A key generator for ECDH-ES +type ecKeyGenerator struct { + size int + algID string + publicKey *ecdsa.PublicKey +} + +// A generic EC-based decrypter/signer +type ecDecrypterSigner struct { + privateKey *ecdsa.PrivateKey +} + +type edDecrypterSigner struct { + privateKey ed25519.PrivateKey +} + +// newRSARecipient creates recipientKeyInfo based on the given key. +func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &rsaEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newRSASigner creates a recipientSigInfo based on the given key. +func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case RS256, RS384, RS512, PS256, PS384, PS512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &rsaDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { + if sigAlg != EdDSA { + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &edDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// newECDHRecipient creates recipientKeyInfo based on the given key. +func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &ecEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newECDSASigner creates a recipientSigInfo based on the given key. +func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case ES256, ES384, ES512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &ecDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// Encrypt the given payload and update the object. +func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + encryptedKey, err := ctx.encrypt(cek, alg) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: encryptedKey, + header: &rawHeader{}, + }, nil +} + +// Encrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { + switch alg { + case RSA1_5: + return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) + case RSA_OAEP: + return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) + case RSA_OAEP_256: + return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Decrypt the given payload and return the content encryption key. +func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) +} + +// Decrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { + // Note: The random reader on decrypt operations is only used for blinding, + // so stubbing is meanlingless (hence the direct use of rand.Reader). + switch alg { + case RSA1_5: + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // Perform some input validation. + keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 + if keyBytes != len(jek) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, ErrCryptoFailure + } + + cek, _, err := generator.genKey() + if err != nil { + return nil, ErrCryptoFailure + } + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberately ignoring errors here. + _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) + + return cek, nil + case RSA_OAEP: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + case RSA_OAEP_256: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Sign the given payload +func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return Signature{}, ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + var out []byte + var err error + + switch alg { + case RS256, RS384, RS512: + out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) + case PS256, PS384, PS512: + out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }) + } + + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + switch alg { + case RS256, RS384, RS512: + return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) + case PS256, PS384, PS512: + return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) + } + + return ErrUnsupportedAlgorithm +} + +// Encrypt the given payload and update the object. +func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + switch alg { + case ECDH_ES: + // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. + return recipientInfo{ + header: &rawHeader{}, + }, nil + case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientInfo{}, ErrUnsupportedAlgorithm + } + + generator := ecKeyGenerator{ + algID: string(alg), + publicKey: ctx.publicKey, + } + + switch alg { + case ECDH_ES_A128KW: + generator.size = 16 + case ECDH_ES_A192KW: + generator.size = 24 + case ECDH_ES_A256KW: + generator.size = 32 + } + + kek, header, err := generator.genKey() + if err != nil { + return recipientInfo{}, err + } + + block, err := aes.NewCipher(kek) + if err != nil { + return recipientInfo{}, err + } + + jek, err := josecipher.KeyWrap(block, cek) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: jek, + header: &header, + }, nil +} + +// Get key size for EC key generator +func (ctx ecKeyGenerator) keySize() int { + return ctx.size +} + +// Get a content encryption key for ECDH-ES +func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { + priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) + if err != nil { + return nil, rawHeader{}, err + } + + out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) + + b, err := json.Marshal(&JSONWebKey{ + Key: &priv.PublicKey, + }) + if err != nil { + return nil, nil, err + } + + headers := rawHeader{ + headerEPK: makeRawMessage(b), + } + + return out, headers, nil +} + +// Decrypt the given payload and return the content encryption key. +func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + epk, err := headers.getEPK() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + if epk == nil { + return nil, errors.New("go-jose/go-jose: missing epk header") + } + + publicKey, ok := epk.Key.(*ecdsa.PublicKey) + if publicKey == nil || !ok { + return nil, errors.New("go-jose/go-jose: invalid epk header") + } + + if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return nil, errors.New("go-jose/go-jose: invalid public key in epk header") + } + + apuData, err := headers.getAPU() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apu header") + } + apvData, err := headers.getAPV() + if err != nil { + return nil, errors.New("go-jose/go-jose: invalid apv header") + } + + deriveKey := func(algID string, size int) []byte { + return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) + } + + var keySize int + + algorithm := headers.getAlgorithm() + switch algorithm { + case ECDH_ES: + // ECDH-ES uses direct key agreement, no key unwrapping necessary. + return deriveKey(string(headers.getEncryption()), generator.keySize()), nil + case ECDH_ES_A128KW: + keySize = 16 + case ECDH_ES_A192KW: + keySize = 24 + case ECDH_ES_A256KW: + keySize = 32 + default: + return nil, ErrUnsupportedAlgorithm + } + + key := deriveKey(string(algorithm), keySize) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + return josecipher.KeyUnwrap(block, recipient.encryptedKey) +} + +func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + if alg != EdDSA { + return Signature{}, ErrUnsupportedAlgorithm + } + + sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: sig, + protected: &rawHeader{}, + }, nil +} + +func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + if alg != EdDSA { + return ErrUnsupportedAlgorithm + } + ok := ed25519.Verify(ctx.publicKey, payload, signature) + if !ok { + return errors.New("go-jose/go-jose: ed25519 signature failed to verify") + } + return nil +} + +// Sign the given payload +func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var expectedBitSize int + var hash crypto.Hash + + switch alg { + case ES256: + expectedBitSize = 256 + hash = crypto.SHA256 + case ES384: + expectedBitSize = 384 + hash = crypto.SHA384 + case ES512: + expectedBitSize = 521 + hash = crypto.SHA512 + } + + curveBits := ctx.privateKey.Curve.Params().BitSize + if expectedBitSize != curveBits { + return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) + if err != nil { + return Signature{}, err + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // We serialize the outputs (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var keySize int + var hash crypto.Hash + + switch alg { + case ES256: + keySize = 32 + hash = crypto.SHA256 + case ES384: + keySize = 48 + hash = crypto.SHA384 + case ES512: + keySize = 66 + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + if len(signature) != 2*keySize { + return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r := big.NewInt(0).SetBytes(signature[:keySize]) + s := big.NewInt(0).SetBytes(signature[keySize:]) + + match := ecdsa.Verify(ctx.publicKey, hashed, r, s) + if !match { + return errors.New("go-jose/go-jose: ecdsa signature failed to verify") + } + + return nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go new file mode 100644 index 00000000..af029cec --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go @@ -0,0 +1,196 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "errors" + "hash" +) + +const ( + nonceBytes = 16 +) + +// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. +func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { + keySize := len(key) / 2 + integrityKey := key[:keySize] + encryptionKey := key[keySize:] + + blockCipher, err := newBlockCipher(encryptionKey) + if err != nil { + return nil, err + } + + var hash func() hash.Hash + switch keySize { + case 16: + hash = sha256.New + case 24: + hash = sha512.New384 + case 32: + hash = sha512.New + } + + return &cbcAEAD{ + hash: hash, + blockCipher: blockCipher, + authtagBytes: keySize, + integrityKey: integrityKey, + }, nil +} + +// An AEAD based on CBC+HMAC +type cbcAEAD struct { + hash func() hash.Hash + authtagBytes int + integrityKey []byte + blockCipher cipher.Block +} + +func (ctx *cbcAEAD) NonceSize() int { + return nonceBytes +} + +func (ctx *cbcAEAD) Overhead() int { + // Maximum overhead is block size (for padding) plus auth tag length, where + // the length of the auth tag is equivalent to the key size. + return ctx.blockCipher.BlockSize() + ctx.authtagBytes +} + +// Seal encrypts and authenticates the plaintext. +func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { + // Output buffer -- must take care not to mangle plaintext input. + ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] + copy(ciphertext, plaintext) + ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) + + cbc.CryptBlocks(ciphertext, ciphertext) + authtag := ctx.computeAuthTag(data, nonce, ciphertext) + + ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) + copy(out, ciphertext) + copy(out[len(ciphertext):], authtag) + + return ret +} + +// Open decrypts and authenticates the ciphertext. +func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < ctx.authtagBytes { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)") + } + + offset := len(ciphertext) - ctx.authtagBytes + expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) + match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) + if match != 1 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)") + } + + cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) + + // Make copy of ciphertext buffer, don't want to modify in place + buffer := append([]byte{}, ciphertext[:offset]...) + + if len(buffer)%ctx.blockCipher.BlockSize() > 0 { + return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") + } + + cbc.CryptBlocks(buffer, buffer) + + // Remove padding + plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) + if err != nil { + return nil, err + } + + ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) + copy(out, plaintext) + + return ret, nil +} + +// Compute an authentication tag +func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { + buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) + n := 0 + n += copy(buffer, aad) + n += copy(buffer[n:], nonce) + n += copy(buffer[n:], ciphertext) + binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) + + // According to documentation, Write() on hash.Hash never fails. + hmac := hmac.New(ctx.hash, ctx.integrityKey) + _, _ = hmac.Write(buffer) + + return hmac.Sum(nil)[:ctx.authtagBytes] +} + +// resize ensures that the given slice has a capacity of at least n bytes. +// If the capacity of the slice is less than n, a new slice is allocated +// and the existing data will be copied. +func resize(in []byte, n uint64) (head, tail []byte) { + if uint64(cap(in)) >= n { + head = in[:n] + } else { + head = make([]byte, n) + copy(head, in) + } + + tail = head[len(in):] + return +} + +// Apply padding +func padBuffer(buffer []byte, blockSize int) []byte { + missing := blockSize - (len(buffer) % blockSize) + ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) + padding := bytes.Repeat([]byte{byte(missing)}, missing) + copy(out, padding) + return ret +} + +// Remove padding +func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { + if len(buffer)%blockSize != 0 { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + last := buffer[len(buffer)-1] + count := int(last) + + if count == 0 || count > blockSize || count > len(buffer) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + padding := bytes.Repeat([]byte{last}, count) + if !bytes.HasSuffix(buffer, padding) { + return nil, errors.New("go-jose/go-jose: invalid padding") + } + + return buffer[:len(buffer)-count], nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go new file mode 100644 index 00000000..f62c3bdb --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go @@ -0,0 +1,75 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "encoding/binary" + "hash" + "io" +) + +type concatKDF struct { + z, info []byte + i uint32 + cache []byte + hasher hash.Hash +} + +// NewConcatKDF builds a KDF reader based on the given inputs. +func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { + buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) + n := 0 + n += copy(buffer, algID) + n += copy(buffer[n:], ptyUInfo) + n += copy(buffer[n:], ptyVInfo) + n += copy(buffer[n:], supPubInfo) + copy(buffer[n:], supPrivInfo) + + hasher := hash.New() + + return &concatKDF{ + z: z, + info: buffer, + hasher: hasher, + cache: []byte{}, + i: 1, + } +} + +func (ctx *concatKDF) Read(out []byte) (int, error) { + copied := copy(out, ctx.cache) + ctx.cache = ctx.cache[copied:] + + for copied < len(out) { + ctx.hasher.Reset() + + // Write on a hash.Hash never fails + _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) + _, _ = ctx.hasher.Write(ctx.z) + _, _ = ctx.hasher.Write(ctx.info) + + hash := ctx.hasher.Sum(nil) + chunkCopied := copy(out[copied:], hash) + copied += chunkCopied + ctx.cache = hash[chunkCopied:] + + ctx.i++ + } + + return copied, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go new file mode 100644 index 00000000..093c6467 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go @@ -0,0 +1,86 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/binary" +) + +// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. +// It is an error to call this function with a private/public key that are not on the same +// curve. Callers must ensure that the keys are valid before calling this function. Output +// size may be at most 1<<16 bytes (64 KiB). +func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { + if size > 1<<16 { + panic("ECDH-ES output size too large, must be less than or equal to 1<<16") + } + + // algId, partyUInfo, partyVInfo inputs must be prefixed with the length + algID := lengthPrefixed([]byte(alg)) + ptyUInfo := lengthPrefixed(apuData) + ptyVInfo := lengthPrefixed(apvData) + + // suppPubInfo is the encoded length of the output size in bits + supPubInfo := make([]byte, 4) + binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) + + if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { + panic("public key not on same curve as private key") + } + + z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + zBytes := z.Bytes() + + // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from + // the returned byte array. This can lead to a problem where zBytes will be + // shorter than expected which breaks the key derivation. Therefore we must pad + // to the full length of the expected coordinate here before calling the KDF. + octSize := dSize(priv.Curve) + if len(zBytes) != octSize { + zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) + } + + reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) + key := make([]byte, size) + + // Read on the KDF will never fail + _, _ = reader.Read(key) + + return key +} + +// dSize returns the size in octets for a coordinate on a elliptic curve. +func dSize(curve elliptic.Curve) int { + order := curve.Params().P + bitLen := order.BitLen() + size := bitLen / 8 + if bitLen%8 != 0 { + size++ + } + return size +} + +func lengthPrefixed(data []byte) []byte { + out := make([]byte, len(data)+4) + binary.BigEndian.PutUint32(out, uint32(len(data))) + copy(out[4:], data) + return out +} diff --git a/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go new file mode 100644 index 00000000..b9effbca --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go @@ -0,0 +1,109 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "errors" +) + +var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} + +// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. +func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := len(cek) / 8 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], cek[i*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer, defaultIV) + + for t := 0; t < 6*n; t++ { + copy(buffer[8:], r[t%n]) + + block.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(r[t%n], buffer[8:]) + } + + out := make([]byte, (n+1)*8) + copy(out, buffer[:8]) + for i := range r { + copy(out[(i+1)*8:], r[i]) + } + + return out, nil +} + +// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. +func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { + if len(ciphertext)%8 != 0 { + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") + } + + n := (len(ciphertext) / 8) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], ciphertext[(i+1)*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer[:8], ciphertext[:8]) + + for t := 6*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] ^= tBytes[i] + } + copy(buffer[8:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[8:]) + } + + if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { + return nil, errors.New("go-jose/go-jose: failed to unwrap key") + } + + out := make([]byte, n*8) + for i := range r { + copy(out[i*8:], r[i]) + } + + return out, nil +} diff --git a/vendor/github.com/go-jose/go-jose/v3/crypter.go b/vendor/github.com/go-jose/go-jose/v3/crypter.go new file mode 100644 index 00000000..6901137e --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/crypter.go @@ -0,0 +1,544 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + "reflect" + + "github.com/go-jose/go-jose/v3/json" +) + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter interface { + Encrypt(plaintext []byte) (*JSONWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) + Options() EncrypterOptions +} + +// A generic content cipher +type contentCipher interface { + keySize() int + encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) + decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) +} + +// A key generator (for generating/getting a CEK) +type keyGenerator interface { + keySize() int + genKey() ([]byte, rawHeader, error) +} + +// A generic key encrypter +type keyEncrypter interface { + encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key +} + +// A generic key decrypter +type keyDecrypter interface { + decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key +} + +// A generic encrypter based on the given key encrypter and content cipher. +type genericEncrypter struct { + contentAlg ContentEncryption + compressionAlg CompressionAlgorithm + cipher contentCipher + recipients []recipientKeyInfo + keyGenerator keyGenerator + extraHeaders map[HeaderKey]interface{} +} + +type recipientKeyInfo struct { + keyID string + keyAlg KeyAlgorithm + keyEncrypter keyEncrypter +} + +// EncrypterOptions represents options that can be set on new encrypters. +type EncrypterOptions struct { + Compression CompressionAlgorithm + + // Optional map of additional keys to be inserted into the protected header + // of a JWS object. Some specifications which make use of JWS like to insert + // additional values here. All values must be JSON-serializable. + ExtraHeaders map[HeaderKey]interface{} +} + +// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it +// if necessary. It returns itself and so can be used in a fluent style. +func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { + if eo.ExtraHeaders == nil { + eo.ExtraHeaders = map[HeaderKey]interface{}{} + } + eo.ExtraHeaders[k] = v + return eo +} + +// WithContentType adds a content type ("cty") header and returns the updated +// EncrypterOptions. +func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderContentType, contentType) +} + +// WithType adds a type ("typ") header and returns the updated EncrypterOptions. +func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderType, typ) +} + +// Recipient represents an algorithm/key to encrypt messages to. +// +// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used +// on the password-based encryption algorithms PBES2-HS256+A128KW, +// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe +// default of 100000 will be used for the count and a 128-bit random salt will +// be generated. +type Recipient struct { + Algorithm KeyAlgorithm + Key interface{} + KeyID string + PBES2Count int + PBES2Salt []byte +} + +// NewEncrypter creates an appropriate encrypter based on the key type +func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: getContentCipher(enc), + } + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + if encrypter.cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + var keyID string + var rawKey interface{} + switch encryptionKey := rcpt.Key.(type) { + case JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case *JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case OpaqueKeyEncrypter: + keyID, rawKey = encryptionKey.KeyID(), encryptionKey + default: + rawKey = encryptionKey + } + + switch rcpt.Algorithm { + case DIRECT: + // Direct encryption mode must be treated differently + if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + return nil, ErrUnsupportedKeyType + } + if encrypter.cipher.keySize() != len(rawKey.([]byte)) { + return nil, ErrInvalidKeySize + } + encrypter.keyGenerator = staticKeyGenerator{ + key: rawKey.([]byte), + } + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + case ECDH_ES: + // ECDH-ES (w/o key wrapping) is similar to DIRECT mode + typeOf := reflect.TypeOf(rawKey) + if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = ecKeyGenerator{ + size: encrypter.cipher.keySize(), + algID: string(enc), + publicKey: rawKey.(*ecdsa.PublicKey), + } + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + default: + // Can just add a standard recipient + encrypter.keyGenerator = randomKeyGenerator{ + size: encrypter.cipher.keySize(), + } + err := encrypter.addRecipient(rcpt) + return encrypter, err + } +} + +// NewMultiEncrypter creates a multi-encrypter based on the given parameters +func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { + cipher := getContentCipher(enc) + + if cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + if len(rcpts) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") + } + + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: cipher, + keyGenerator: randomKeyGenerator{ + size: cipher.keySize(), + }, + } + + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + for _, recipient := range rcpts { + err := encrypter.addRecipient(recipient) + if err != nil { + return nil, err + } + } + + return encrypter, nil +} + +func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { + var recipientInfo recipientKeyInfo + + switch recipient.Algorithm { + case DIRECT, ECDH_ES: + return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) + } + + recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) + if recipient.KeyID != "" { + recipientInfo.keyID = recipient.KeyID + } + + switch recipient.Algorithm { + case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: + if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { + sr.p2c = recipient.PBES2Count + sr.p2s = recipient.PBES2Salt + } + } + + if err == nil { + ctx.recipients = append(ctx.recipients, recipientInfo) + } + return err +} + +func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { + switch encryptionKey := encryptionKey.(type) { + case *rsa.PublicKey: + return newRSARecipient(alg, encryptionKey) + case *ecdsa.PublicKey: + return newECDHRecipient(alg, encryptionKey) + case []byte: + return newSymmetricRecipient(alg, encryptionKey) + case string: + return newSymmetricRecipient(alg, []byte(encryptionKey)) + case *JSONWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + recipient.keyID = encryptionKey.KeyID + return recipient, err + } + if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { + return newOpaqueKeyEncrypter(alg, encrypter) + } + return recipientKeyInfo{}, ErrUnsupportedKeyType +} + +// newDecrypter creates an appropriate decrypter based on the key type +func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { + switch decryptionKey := decryptionKey.(type) { + case *rsa.PrivateKey: + return &rsaDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case *ecdsa.PrivateKey: + return &ecDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case []byte: + return &symmetricKeyCipher{ + key: decryptionKey, + }, nil + case string: + return &symmetricKeyCipher{ + key: []byte(decryptionKey), + }, nil + case JSONWebKey: + return newDecrypter(decryptionKey.Key) + case *JSONWebKey: + return newDecrypter(decryptionKey.Key) + } + if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { + return &opaqueKeyDecrypter{decrypter: okd}, nil + } + return nil, ErrUnsupportedKeyType +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { + return ctx.EncryptWithAuthData(plaintext, nil) +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { + obj := &JSONWebEncryption{} + obj.aad = aad + + obj.protected = &rawHeader{} + err := obj.protected.set(headerEncryption, ctx.contentAlg) + if err != nil { + return nil, err + } + + obj.recipients = make([]recipientInfo, len(ctx.recipients)) + + if len(ctx.recipients) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to") + } + + cek, headers, err := ctx.keyGenerator.genKey() + if err != nil { + return nil, err + } + + obj.protected.merge(&headers) + + for i, info := range ctx.recipients { + recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) + if err != nil { + return nil, err + } + + err = recipient.header.set(headerAlgorithm, info.keyAlg) + if err != nil { + return nil, err + } + + if info.keyID != "" { + err = recipient.header.set(headerKeyID, info.keyID) + if err != nil { + return nil, err + } + } + obj.recipients[i] = recipient + } + + if len(ctx.recipients) == 1 { + // Move per-recipient headers into main protected header if there's + // only a single recipient. + obj.protected.merge(obj.recipients[0].header) + obj.recipients[0].header = nil + } + + if ctx.compressionAlg != NONE { + plaintext, err = compress(ctx.compressionAlg, plaintext) + if err != nil { + return nil, err + } + + err = obj.protected.set(headerCompression, ctx.compressionAlg) + if err != nil { + return nil, err + } + } + + for k, v := range ctx.extraHeaders { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + (*obj.protected)[k] = makeRawMessage(b) + } + + authData := obj.computeAuthData() + parts, err := ctx.cipher.encrypt(cek, authData, plaintext) + if err != nil { + return nil, err + } + + obj.iv = parts.iv + obj.ciphertext = parts.ciphertext + obj.tag = parts.tag + + return obj, nil +} + +func (ctx *genericEncrypter) Options() EncrypterOptions { + return EncrypterOptions{ + Compression: ctx.compressionAlg, + ExtraHeaders: ctx.extraHeaders, + } +} + +// Decrypt and validate the object and return the plaintext. Note that this +// function does not support multi-recipient, if you desire multi-recipient +// decryption use DecryptMulti instead. +func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { + headers := obj.mergedHeaders(nil) + + if len(obj.recipients) > 1 { + return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") + } + + critical, err := headers.getCritical() + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) + if err != nil { + return nil, err + } + + cipher := getContentCipher(headers.getEncryption()) + if cipher == nil { + return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + var plaintext []byte + recipient := obj.recipients[0] + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + } + + if plaintext == nil { + return nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + } + + return plaintext, err +} + +// DecryptMulti decrypts and validates the object and returns the plaintexts, +// with support for multiple recipients. It returns the index of the recipient +// for which the decryption was successful, the merged headers for that recipient, +// and the plaintext. +func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { + globalHeaders := obj.mergedHeaders(nil) + + critical, err := globalHeaders.getCritical() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + } + + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) + if err != nil { + return -1, Header{}, nil, err + } + + encryption := globalHeaders.getEncryption() + cipher := getContentCipher(encryption) + if cipher == nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption)) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + index := -1 + var plaintext []byte + var headers rawHeader + + for i, recipient := range obj.recipients { + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + if err == nil { + index = i + headers = recipientHeaders + break + } + } + } + + if plaintext == nil { + return -1, Header{}, nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, _ = decompress(comp, plaintext) + } + + sanitized, err := headers.sanitized() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err) + } + + return index, sanitized, plaintext, err +} diff --git a/vendor/github.com/go-jose/go-jose/v3/doc.go b/vendor/github.com/go-jose/go-jose/v3/doc.go new file mode 100644 index 00000000..71ec1c41 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/doc.go @@ -0,0 +1,27 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. It implements encryption and signing based on +the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web +Token support available in a sub-package. The library supports both the compact +and JWS/JWE JSON Serialization formats, and has optional support for multiple +recipients. + +*/ +package jose diff --git a/vendor/github.com/go-jose/go-jose/v3/encoding.go b/vendor/github.com/go-jose/go-jose/v3/encoding.go new file mode 100644 index 00000000..968a4249 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/encoding.go @@ -0,0 +1,191 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "compress/flate" + "encoding/base64" + "encoding/binary" + "io" + "math/big" + "strings" + "unicode" + + "github.com/go-jose/go-jose/v3/json" +) + +// Helper function to serialize known-good objects. +// Precondition: value is not a nil pointer. +func mustSerializeJSON(value interface{}) []byte { + out, err := json.Marshal(value) + if err != nil { + panic(err) + } + // We never want to serialize the top-level value "null," since it's not a + // valid JOSE message. But if a caller passes in a nil pointer to this method, + // MarshalJSON will happily serialize it as the top-level value "null". If + // that value is then embedded in another operation, for instance by being + // base64-encoded and fed as input to a signing algorithm + // (https://github.com/go-jose/go-jose/issues/22), the result will be + // incorrect. Because this method is intended for known-good objects, and a nil + // pointer is not a known-good object, we are free to panic in this case. + // Note: It's not possible to directly check whether the data pointed at by an + // interface is a nil pointer, so we do this hacky workaround. + // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I + if string(out) == "null" { + panic("Tried to serialize a nil pointer.") + } + return out +} + +// Strip all newlines and whitespace +func stripWhitespace(data string) string { + buf := strings.Builder{} + buf.Grow(len(data)) + for _, r := range data { + if !unicode.IsSpace(r) { + buf.WriteRune(r) + } + } + return buf.String() +} + +// Perform compression based on algorithm +func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return deflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Perform decompression based on algorithm +func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return inflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Compress with DEFLATE +func deflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + + // Writing to byte buffer, err is always nil + writer, _ := flate.NewWriter(output, 1) + _, _ = io.Copy(writer, bytes.NewBuffer(input)) + + err := writer.Close() + return output.Bytes(), err +} + +// Decompress with DEFLATE +func inflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + reader := flate.NewReader(bytes.NewBuffer(input)) + + _, err := io.Copy(output, reader) + if err != nil { + return nil, err + } + + err = reader.Close() + return output.Bytes(), err +} + +// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. +type byteBuffer struct { + data []byte +} + +func newBuffer(data []byte) *byteBuffer { + if data == nil { + return nil + } + return &byteBuffer{ + data: data, + } +} + +func newFixedSizeBuffer(data []byte, length int) *byteBuffer { + if len(data) > length { + panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") + } + pad := make([]byte, length-len(data)) + return newBuffer(append(pad, data...)) +} + +func newBufferFromInt(num uint64) *byteBuffer { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, num) + return newBuffer(bytes.TrimLeft(data, "\x00")) +} + +func (b *byteBuffer) MarshalJSON() ([]byte, error) { + return json.Marshal(b.base64()) +} + +func (b *byteBuffer) UnmarshalJSON(data []byte) error { + var encoded string + err := json.Unmarshal(data, &encoded) + if err != nil { + return err + } + + if encoded == "" { + return nil + } + + decoded, err := base64URLDecode(encoded) + if err != nil { + return err + } + + *b = *newBuffer(decoded) + + return nil +} + +func (b *byteBuffer) base64() string { + return base64.RawURLEncoding.EncodeToString(b.data) +} + +func (b *byteBuffer) bytes() []byte { + // Handling nil here allows us to transparently handle nil slices when serializing. + if b == nil { + return nil + } + return b.data +} + +func (b byteBuffer) bigInt() *big.Int { + return new(big.Int).SetBytes(b.data) +} + +func (b byteBuffer) toInt() int { + return int(b.bigInt().Int64()) +} + +// base64URLDecode is implemented as defined in https://www.rfc-editor.org/rfc/rfc7515.html#appendix-C +func base64URLDecode(value string) ([]byte, error) { + value = strings.TrimRight(value, "=") + return base64.RawURLEncoding.DecodeString(value) +} diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/go-jose/go-jose/v3/json/LICENSE similarity index 95% rename from vendor/github.com/golang/snappy/LICENSE rename to vendor/github.com/go-jose/go-jose/v3/json/LICENSE index 6050c10f..74487567 100644 --- a/vendor/github.com/golang/snappy/LICENSE +++ b/vendor/github.com/go-jose/go-jose/v3/json/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/go-jose/go-jose/v3/json/README.md b/vendor/github.com/go-jose/go-jose/v3/json/README.md new file mode 100644 index 00000000..86de5e55 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/vendor/github.com/go-jose/go-jose/v3/json/decode.go b/vendor/github.com/go-jose/go-jose/v3/json/decode.go new file mode 100644 index 00000000..4dbc4146 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/decode.go @@ -0,0 +1,1217 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +type NumberUnmarshalType int + +const ( + // unmarshal a JSON number into an interface{} as a float64 + UnmarshalFloat NumberUnmarshalType = iota + // unmarshal a JSON number into an interface{} as a `json.Number` + UnmarshalJSONNumber + // unmarshal a JSON number into an interface{} as a int64 + // if value is an integer otherwise float64 + UnmarshalIntOrFloat +) + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + numberType NumberUnmarshalType +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, []byte(key)) { + f = ff + break + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64, int64 or a Number +// depending on d.numberDecodeType. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + switch d.numberType { + + case UnmarshalJSONNumber: + return Number(s), nil + case UnmarshalIntOrFloat: + v, err := strconv.ParseInt(s, 10, 64) + if err == nil { + return v, nil + } + + // tries to parse integer number in scientific notation + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + + // if it has no decimal value use int64 + if fi, fd := math.Modf(f); fd == 0.0 { + return int64(fi), nil + } + return f, nil + default: + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil + } + +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/go-jose/go-jose/v3/json/encode.go b/vendor/github.com/go-jose/go-jose/v3/json/encode.go new file mode 100644 index 00000000..ea0a1361 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/json/encode.go @@ -0,0 +1,1197 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML