diff --git a/examples/deploy/terraform/cluster/main.tf b/examples/deploy/terraform/cluster/main.tf index d11daa84..cf90583d 100644 --- a/examples/deploy/terraform/cluster/main.tf +++ b/examples/deploy/terraform/cluster/main.tf @@ -27,6 +27,7 @@ module "eks" { tags = local.infra.tags ignore_tags = local.infra.ignore_tags use_fips_endpoint = var.use_fips_endpoint + calico = { image_registry = try(local.infra.storage.ecr.calico_image_registry, null) } } data "aws_caller_identity" "global" { diff --git a/modules/eks/README.md b/modules/eks/README.md index 3e10a3c3..35589775 100644 --- a/modules/eks/README.md +++ b/modules/eks/README.md @@ -69,10 +69,11 @@ | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [bastion\_info](#input\_bastion\_info) | user = Bastion username.
public\_ip = Bastion public ip.
security\_group\_id = Bastion sg id.
ssh\_bastion\_command = Command to ssh onto bastion. |
object({
user = string
public_ip = string
security_group_id = string
ssh_bastion_command = string
})
| n/a | yes | +| [calico](#input\_calico) | calico = {
version = Configure the version for Calico
image\_registry = Configure the image registry for Calico
} |
object({
image_registry = optional(string, "quay.io")
version = optional(string, "v3.27.3")
})
| `{}` | no | | [create\_eks\_role\_arn](#input\_create\_eks\_role\_arn) | Role arn to assume during the EKS cluster creation. | `string` | n/a | yes | | [deploy\_id](#input\_deploy\_id) | Domino Deployment ID | `string` | n/a | yes | | [efs\_security\_group](#input\_efs\_security\_group) | Security Group ID for EFS | `string` | n/a | yes | -| [eks](#input\_eks) | service\_ipv4\_cidr = CIDR for EKS cluster kubernetes\_network\_config.
creation\_role\_name = Name of the role to import.
k8s\_version = EKS cluster k8s version.
nodes\_master Grants the nodes role system:master access. NOT recomended
kubeconfig = {
extra\_args = Optional extra args when generating kubeconfig.
path = Fully qualified path name to write the kubeconfig file.
}
public\_access = {
enabled = Enable EKS API public endpoint.
cidrs = List of CIDR ranges permitted for accessing the EKS public endpoint.
}
Custom role maps for aws auth configmap
custom\_role\_maps = {
rolearn = string
username = string
groups = list(string)
}
master\_role\_names = IAM role names to be added as masters in eks.
cluster\_addons = EKS cluster addons. vpc-cni is installed separately.
vpc\_cni = Configuration for AWS VPC CNI
ssm\_log\_group\_name = CloudWatch log group to send the SSM session logs to.
identity\_providers = Configuration for IDP(Identity Provider).
} |
object({
service_ipv4_cidr = optional(string, "172.20.0.0/16")
creation_role_name = optional(string, null)
k8s_version = optional(string, "1.27")
nodes_master = optional(bool, false)
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns", "vpc-cni"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool, false)
annotate_pod_ip = optional(bool, true)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
| `{}` | no | +| [eks](#input\_eks) | service\_ipv4\_cidr = CIDR for EKS cluster kubernetes\_network\_config.
creation\_role\_name = Name of the role to import.
k8s\_version = EKS cluster k8s version.
nodes\_master Grants the nodes role system:master access. NOT recomended
kubeconfig = {
extra\_args = Optional extra args when generating kubeconfig.
path = Fully qualified path name to write the kubeconfig file.
}
public\_access = {
enabled = Enable EKS API public endpoint.
cidrs = List of CIDR ranges permitted for accessing the EKS public endpoint.
}
Custom role maps for aws auth configmap
custom\_role\_maps = {
rolearn = string
username = string
groups = list(string)
}
master\_role\_names = IAM role names to be added as masters in eks.
cluster\_addons = EKS cluster addons. vpc-cni is installed separately.
vpc\_cni = Configuration for AWS VPC CNI
ssm\_log\_group\_name = CloudWatch log group to send the SSM session logs to.
identity\_providers = Configuration for IDP(Identity Provider).
} |
object({
service_ipv4_cidr = optional(string, "172.20.0.0/16")
creation_role_name = optional(string, null)
k8s_version = optional(string, "1.27")
nodes_master = optional(bool, false)
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns", "vpc-cni"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool, false)
annotate_pod_ip = optional(bool, true)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), []),
})
| `{}` | no | | [ignore\_tags](#input\_ignore\_tags) | Tag keys to be ignored by the aws provider. | `list(string)` | `[]` | no | | [kms\_info](#input\_kms\_info) | key\_id = KMS key id.
key\_arn = KMS key arn.
enabled = KMS key is enabled |
object({
key_id = string
key_arn = string
enabled = bool
})
| n/a | yes | | [network\_info](#input\_network\_info) | id = VPC ID.
subnets = {
public = List of public Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
private = List of private Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
pod = List of pod Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
} |
object({
vpc_id = string
subnets = object({
public = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
private = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
pod = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
})
vpc_cidrs = optional(string, "10.0.0.0/16")
})
| n/a | yes | diff --git a/modules/eks/main.tf b/modules/eks/main.tf index 67c4ebdc..76a123fc 100644 --- a/modules/eks/main.tf +++ b/modules/eks/main.tf @@ -252,5 +252,6 @@ locals { }] } kubeconfig = local.kubeconfig + calico = var.calico } } diff --git a/modules/eks/submodules/k8s/README.md b/modules/eks/submodules/k8s/README.md index 1d7bc85a..c31aba67 100644 --- a/modules/eks/submodules/k8s/README.md +++ b/modules/eks/submodules/k8s/README.md @@ -33,8 +33,7 @@ No modules. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [bastion\_info](#input\_bastion\_info) | user = Bastion username.
public\_ip = Bastion public ip.
security\_group\_id = Bastion sg id.
ssh\_bastion\_command = Command to ssh onto bastion. |
object({
user = string
public_ip = string
security_group_id = string
ssh_bastion_command = string
})
| n/a | yes | -| [calico\_version](#input\_calico\_version) | Calico operator version. | `string` | `"v3.25.2"` | no | -| [eks\_info](#input\_eks\_info) | cluster = {
version = K8s version.
arn = EKS Cluster arn.
security\_group\_id = EKS Cluster security group id.
endpoint = EKS Cluster API endpoint.
roles = Default IAM Roles associated with the EKS cluster. {
name = string
arn = string
}
custom\_roles = Custom IAM Roles associated with the EKS cluster. {
rolearn = string
username = string
groups = list(string)
}
oidc = {
arn = OIDC provider ARN.
url = OIDC provider url.
}
}
nodes = {
security\_group\_id = EKS Nodes security group id.
roles = IAM Roles associated with the EKS Nodes.{
name = string
arn = string
}
}
kubeconfig = Kubeconfig details.{
path = string
extra\_args = string
} |
object({
cluster = object({
version = string
arn = string
security_group_id = string
endpoint = string
roles = list(object({
name = string
arn = string
}))
custom_roles = list(object({
rolearn = string
username = string
groups = list(string)
}))
oidc = object({
arn = string
url = string
})
})
nodes = object({
nodes_master = bool
security_group_id = string
roles = list(object({
name = string
arn = string
}))
})
kubeconfig = object({
path = string
extra_args = string
})
})
| n/a | yes | +| [eks\_info](#input\_eks\_info) | cluster = {
version = K8s version.
arn = EKS Cluster arn.
security\_group\_id = EKS Cluster security group id.
endpoint = EKS Cluster API endpoint.
roles = Default IAM Roles associated with the EKS cluster. {
name = string
arn = string
}
custom\_roles = Custom IAM Roles associated with the EKS cluster. {
rolearn = string
username = string
groups = list(string)
}
oidc = {
arn = OIDC provider ARN.
url = OIDC provider url.
}
}
nodes = {
security\_group\_id = EKS Nodes security group id.
roles = IAM Roles associated with the EKS Nodes.{
name = string
arn = string
}
}
kubeconfig = Kubeconfig details.{
path = string
extra\_args = string
}
calico = {
version = Configuration the version for Calico
image\_registry = Configure the image registry for Calico
} |
object({
cluster = object({
version = string
arn = string
security_group_id = string
endpoint = string
roles = list(object({
name = string
arn = string
}))
custom_roles = list(object({
rolearn = string
username = string
groups = list(string)
}))
oidc = object({
arn = string
url = string
})
})
nodes = object({
nodes_master = bool
security_group_id = string
roles = list(object({
name = string
arn = string
}))
})
kubeconfig = object({
path = string
extra_args = string
})
calico = object({
version = string
image_registry = string
})
})
| n/a | yes | | [ssh\_key](#input\_ssh\_key) | path = SSH private key filepath.
key\_pair\_name = AWS key\_pair name. |
object({
path = string
key_pair_name = string
})
| n/a | yes | | [use\_fips\_endpoint](#input\_use\_fips\_endpoint) | Use aws FIPS endpoints | `bool` | `false` | no | diff --git a/modules/eks/submodules/k8s/main.tf b/modules/eks/submodules/k8s/main.tf index 4e6c43a9..62fe0cb3 100644 --- a/modules/eks/submodules/k8s/main.tf +++ b/modules/eks/submodules/k8s/main.tf @@ -13,41 +13,45 @@ locals { resources_directory = path.cwd templates_dir = "${path.module}/templates" + k8s_functions_sh = { + filename = local.k8s_functions_sh_filename + content = templatefile("${local.templates_dir}/${local.k8s_functions_sh_template}", { + kubeconfig_path = var.eks_info.kubeconfig.path + k8s_tunnel_port = random_integer.port.result + aws_auth_yaml = basename(local.aws_auth_filename) + ssh_pvt_key_path = var.ssh_key.path + eks_cluster_arn = var.eks_info.cluster.arn + bastion_user = var.bastion_info != null ? var.bastion_info.user : "" + bastion_public_ip = var.bastion_info != null ? var.bastion_info.public_ip : "" + calico_version = var.eks_info.calico.version + calico_fips_mode = var.use_fips_endpoint ? "Enabled" : "Disabled" + calico_image_registry = var.eks_info.calico.image_registry + }) + } + + aws_auth = { + filename = local.aws_auth_filename + content = templatefile("${local.templates_dir}/${local.aws_auth_template}", + { + nodes_master = try(var.eks_info.nodes.nodes_master, false) + eks_node_role_arns = toset(var.eks_info.nodes.roles[*].arn) + eks_master_role_arns = toset(var.eks_info.cluster.roles[*].arn) + eks_custom_role_maps = var.eks_info.cluster.custom_roles + }) + } + templates = { - k8s_functions_sh = { - filename = local.k8s_functions_sh_filename - content = templatefile("${local.templates_dir}/${local.k8s_functions_sh_template}", { - kubeconfig_path = var.eks_info.kubeconfig.path - k8s_tunnel_port = random_integer.port.result - aws_auth_yaml = basename(local.aws_auth_filename) - ssh_pvt_key_path = var.ssh_key.path - eks_cluster_arn = var.eks_info.cluster.arn - calico_version = var.calico_version - bastion_user = var.bastion_info != null ? var.bastion_info.user : "" - bastion_public_ip = var.bastion_info != null ? var.bastion_info.public_ip : "" - calico_fips_mode = var.use_fips_endpoint ? "Enabled" : "Disabled" - }) - } + k8s_functions_sh = local.k8s_functions_sh + aws_auth = local.aws_auth k8s_presetup = { filename = local.k8s_pre_setup_sh_file content = templatefile("${local.templates_dir}/${local.k8s_pre_setup_sh_template}", { - k8s_functions_sh_filename = local.k8s_functions_sh_filename + k8s_functions_sh_filename = local.k8s_functions_sh.filename + hash = join("-", [md5(local.k8s_functions_sh.content), md5(local.aws_auth.content)]) use_fips_endpoint = tostring(var.use_fips_endpoint) }) } - - aws_auth = { - filename = local.aws_auth_filename - content = templatefile("${local.templates_dir}/${local.aws_auth_template}", - { - nodes_master = try(var.eks_info.nodes.nodes_master, false) - eks_node_role_arns = toset(var.eks_info.nodes.roles[*].arn) - eks_master_role_arns = toset(var.eks_info.cluster.roles[*].arn) - eks_custom_role_maps = var.eks_info.cluster.custom_roles - }) - - } } } diff --git a/modules/eks/submodules/k8s/templates/k8s-functions.sh.tftpl b/modules/eks/submodules/k8s/templates/k8s-functions.sh.tftpl index 82172f90..186616ac 100644 --- a/modules/eks/submodules/k8s/templates/k8s-functions.sh.tftpl +++ b/modules/eks/submodules/k8s/templates/k8s-functions.sh.tftpl @@ -119,8 +119,9 @@ install_calico() { --namespace "tigera-operator" \ --set installation.kubernetesProvider=EKS \ --set installation.cni.type=AmazonVPC \ - --set installation.registry="quay.io/" \ + --set installation.registry="${calico_image_registry}/" \ --set installation.fipsMode="${calico_fips_mode}" \ + --wait \ --timeout 10m \ --create-namespace \ --install diff --git a/modules/eks/submodules/k8s/templates/k8s-pre-setup.sh.tftpl b/modules/eks/submodules/k8s/templates/k8s-pre-setup.sh.tftpl index d8d2d67f..70f09f9b 100644 --- a/modules/eks/submodules/k8s/templates/k8s-pre-setup.sh.tftpl +++ b/modules/eks/submodules/k8s/templates/k8s-pre-setup.sh.tftpl @@ -1,6 +1,8 @@ #!/usr/bin/env bash set -euo pipefail +# ${hash} + source ${k8s_functions_sh_filename} export AWS_USE_FIPS_ENDPOINT=${use_fips_endpoint} diff --git a/modules/eks/submodules/k8s/variables.tf b/modules/eks/submodules/k8s/variables.tf index 33072aaa..51eaec3d 100644 --- a/modules/eks/submodules/k8s/variables.tf +++ b/modules/eks/submodules/k8s/variables.tf @@ -1,9 +1,3 @@ -variable "calico_version" { - type = string - description = "Calico operator version." - default = "v3.25.2" -} - variable "bastion_info" { description = < [deploy\_id](#input\_deploy\_id) | Domino Deployment ID | `string` | n/a | yes | | [kms\_info](#input\_kms\_info) | key\_id = KMS key id.
key\_arn = KMS key arn.
enabled = KMS key is enabled |
object({
key_id = string
key_arn = string
enabled = bool
})
| n/a | yes | | [network\_info](#input\_network\_info) | id = VPC ID.
subnets = {
public = List of public Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
private = List of private Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
pod = List of pod Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
} |
object({
vpc_id = string
subnets = object({
public = optional(list(object({
name = string
subnet_id = string
az = string
az_id = string
})), [])
private = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
pod = optional(list(object({
name = string
subnet_id = string
az = string
az_id = string
})), [])
})
})
| n/a | yes | +| [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes | | [storage](#input\_storage) | storage = {
efs = {
access\_point\_path = Filesystem path for efs.
backup\_vault = {
create = Create backup vault for EFS toggle.
force\_destroy = Toggle to allow automatic destruction of all backups when destroying.
backup = {
schedule = Cron-style schedule for EFS backup vault (default: once a day at 12pm).
cold\_storage\_after = Move backup data to cold storage after this many days.
delete\_after = Delete backup data after this many days.
}
}
}
s3 = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets.
}
ecr = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the ECR repositories. if 'false' terraform will NOT be able to delete non-empty repositories.
}
enable\_remote\_backup = Enable tagging required for cross-account backups
costs\_enabled = Determines whether to provision domino cost related infrastructures, ie, long term storage
}
} |
object({
efs = optional(object({
access_point_path = optional(string)
backup_vault = optional(object({
create = optional(bool)
force_destroy = optional(bool)
backup = optional(object({
schedule = optional(string)
cold_storage_after = optional(number)
delete_after = optional(number)
}))
}))
}))
s3 = optional(object({
force_destroy_on_deletion = optional(bool)
}))
ecr = optional(object({
force_destroy_on_deletion = optional(bool)
}))
enable_remote_backup = optional(bool)
costs_enabled = optional(bool)
})
| n/a | yes | | [use\_fips\_endpoint](#input\_use\_fips\_endpoint) | Use aws FIPS endpoints | `bool` | `false` | no | @@ -76,5 +80,5 @@ No modules. | Name | Description | |------|-------------| -| [info](#output\_info) | efs = {
access\_point = EFS access point.
file\_system = EFS file\_system.
security\_group\_id = EFS security group id.
}
s3 = {
buckets = "S3 buckets name and arn"
iam\_policy\_arn = S3 IAM Policy ARN.
}
ecr = {
container\_registry = ECR base registry URL. Grab the base AWS account ECR URL and add the deploy\_id. Domino will append /environment and /model.
iam\_policy\_arn = ECR IAM Policy ARN.
} | +| [info](#output\_info) | efs = {
access\_point = EFS access point.
file\_system = EFS file\_system.
security\_group\_id = EFS security group id.
}
s3 = {
buckets = "S3 buckets name and arn"
iam\_policy\_arn = S3 IAM Policy ARN.
}
ecr = {
container\_registry = ECR base registry URL. Grab the base AWS account ECR URL and add the deploy\_id. Domino will append /environment and /model.
iam\_policy\_arn = ECR IAM Policy ARN.
calico\_image\_registry = Image registry for Calico. Will be a pull through cache for Quay.io unless in GovCloud, China, or have FIPS enabled.
} | diff --git a/modules/infra/submodules/storage/ecr.tf b/modules/infra/submodules/storage/ecr.tf index d78697dc..7a0bf9bb 100644 --- a/modules/infra/submodules/storage/ecr.tf +++ b/modules/infra/submodules/storage/ecr.tf @@ -1,6 +1,10 @@ locals { encryption_type = var.kms_info.enabled ? "KMS" : "AES256" ecr_repos = toset(["model", "environment"]) + + # FIPS, GovCloud and China don't support pull through cache fully yet + # https://docs.aws.amazon.com/AmazonECR/latest/userguide/pull-through-cache.html#pull-through-cache-considerations + supports_pull_through_cache = data.aws_partition.current.partition == "aws" && !var.use_fips_endpoint } resource "aws_ecr_repository" "this" { @@ -20,5 +24,10 @@ resource "aws_ecr_repository" "this" { encryption_configuration, ] } +} +resource "aws_ecr_pull_through_cache_rule" "quay" { + count = local.supports_pull_through_cache ? 1 : 0 + ecr_repository_prefix = "${var.deploy_id}/quay" + upstream_registry_url = "quay.io" } diff --git a/modules/infra/submodules/storage/iam.tf b/modules/infra/submodules/storage/iam.tf index c91d1872..8a5191a8 100644 --- a/modules/infra/submodules/storage/iam.tf +++ b/modules/infra/submodules/storage/iam.tf @@ -56,6 +56,27 @@ data "aws_iam_policy_document" "ecr" { "ecr:PutImage" ] } + + override_policy_documents = local.supports_pull_through_cache ? [data.aws_iam_policy_document.ecr_pull_through_cache[0].json] : [] +} + +data "aws_iam_policy_document" "ecr_pull_through_cache" { + count = local.supports_pull_through_cache ? 1 : 0 + + statement { + effect = "Allow" + + resources = [ + "arn:${data.aws_partition.current.partition}:ecr:${var.region}:${data.aws_caller_identity.this.account_id}:repository/${aws_ecr_pull_through_cache_rule.quay[0].ecr_repository_prefix}/*" + ] + + actions = [ + "ecr:BatchGetImage", + "ecr:BatchImportUpstreamImage", + "ecr:CreateRepository", + "ecr:GetDownloadUrlForLayer" + ] + } } resource "aws_iam_policy" "ecr" { diff --git a/modules/infra/submodules/storage/main.tf b/modules/infra/submodules/storage/main.tf index f1bd50fd..8fd1f50d 100644 --- a/modules/infra/submodules/storage/main.tf +++ b/modules/infra/submodules/storage/main.tf @@ -1,5 +1,6 @@ data "aws_elb_service_account" "this" {} data "aws_partition" "current" {} +data "aws_caller_identity" "this" {} locals { private_subnet_ids = var.network_info.subnets.private[*].subnet_id diff --git a/modules/infra/submodules/storage/outputs.tf b/modules/infra/submodules/storage/outputs.tf index 657f2705..9324b15b 100644 --- a/modules/infra/submodules/storage/outputs.tf +++ b/modules/infra/submodules/storage/outputs.tf @@ -12,6 +12,7 @@ output "info" { ecr = { container_registry = ECR base registry URL. Grab the base AWS account ECR URL and add the deploy_id. Domino will append /environment and /model. iam_policy_arn = ECR IAM Policy ARN. + calico_image_registry = Image registry for Calico. Will be a pull through cache for Quay.io unless in GovCloud, China, or have FIPS enabled. } EOF value = { @@ -32,8 +33,9 @@ output "info" { iam_policy_arn = aws_iam_policy.s3.arn } ecr = { - container_registry = join("/", concat(slice(split("/", aws_ecr_repository.this["environment"].repository_url), 0, 1), [var.deploy_id])) - iam_policy_arn = aws_iam_policy.ecr.arn + container_registry = join("/", concat(slice(split("/", aws_ecr_repository.this["environment"].repository_url), 0, 1), [var.deploy_id])) + iam_policy_arn = aws_iam_policy.ecr.arn + calico_image_registry = local.supports_pull_through_cache ? "${data.aws_caller_identity.this.id}.dkr.ecr.${var.region}.amazonaws.com/${var.deploy_id}/quay" : "quay.io" } } } diff --git a/modules/infra/submodules/storage/variables.tf b/modules/infra/submodules/storage/variables.tf index f02bb624..79dd9c59 100644 --- a/modules/infra/submodules/storage/variables.tf +++ b/modules/infra/submodules/storage/variables.tf @@ -1,3 +1,13 @@ +variable "region" { + type = string + description = "AWS region for the deployment" + nullable = false + validation { + condition = can(regex("(us(-gov)?|ap|ca|cn|eu|sa|me|af|il)-(central|(north|south)?(east|west)?)-[0-9]", var.region)) + error_message = "The provided region must follow the format of AWS region names, e.g., us-west-2, us-gov-west-1." + } +} + variable "deploy_id" { type = string description = "Domino Deployment ID"