diff --git a/examples/deploy/terraform/cluster/main.tf b/examples/deploy/terraform/cluster/main.tf
index d11daa84..cf90583d 100644
--- a/examples/deploy/terraform/cluster/main.tf
+++ b/examples/deploy/terraform/cluster/main.tf
@@ -27,6 +27,7 @@ module "eks" {
tags = local.infra.tags
ignore_tags = local.infra.ignore_tags
use_fips_endpoint = var.use_fips_endpoint
+ calico = { image_registry = try(local.infra.storage.ecr.calico_image_registry, null) }
}
data "aws_caller_identity" "global" {
diff --git a/modules/eks/README.md b/modules/eks/README.md
index 3e10a3c3..35589775 100644
--- a/modules/eks/README.md
+++ b/modules/eks/README.md
@@ -69,10 +69,11 @@
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| [bastion\_info](#input\_bastion\_info) | user = Bastion username.
public\_ip = Bastion public ip.
security\_group\_id = Bastion sg id.
ssh\_bastion\_command = Command to ssh onto bastion. |
object({| n/a | yes | +| [calico](#input\_calico) | calico = {
user = string
public_ip = string
security_group_id = string
ssh_bastion_command = string
})
object({| `{}` | no | | [create\_eks\_role\_arn](#input\_create\_eks\_role\_arn) | Role arn to assume during the EKS cluster creation. | `string` | n/a | yes | | [deploy\_id](#input\_deploy\_id) | Domino Deployment ID | `string` | n/a | yes | | [efs\_security\_group](#input\_efs\_security\_group) | Security Group ID for EFS | `string` | n/a | yes | -| [eks](#input\_eks) | service\_ipv4\_cidr = CIDR for EKS cluster kubernetes\_network\_config.
image_registry = optional(string, "quay.io")
version = optional(string, "v3.27.3")
})
object({| `{}` | no | +| [eks](#input\_eks) | service\_ipv4\_cidr = CIDR for EKS cluster kubernetes\_network\_config.
service_ipv4_cidr = optional(string, "172.20.0.0/16")
creation_role_name = optional(string, null)
k8s_version = optional(string, "1.27")
nodes_master = optional(bool, false)
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns", "vpc-cni"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool, false)
annotate_pod_ip = optional(bool, true)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), [])
})
object({| `{}` | no | | [ignore\_tags](#input\_ignore\_tags) | Tag keys to be ignored by the aws provider. | `list(string)` | `[]` | no | | [kms\_info](#input\_kms\_info) | key\_id = KMS key id.
service_ipv4_cidr = optional(string, "172.20.0.0/16")
creation_role_name = optional(string, null)
k8s_version = optional(string, "1.27")
nodes_master = optional(bool, false)
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns", "vpc-cni"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool, false)
annotate_pod_ip = optional(bool, true)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), []),
})
object({| n/a | yes | | [network\_info](#input\_network\_info) | id = VPC ID.
key_id = string
key_arn = string
enabled = bool
})
object({| n/a | yes | diff --git a/modules/eks/main.tf b/modules/eks/main.tf index 67c4ebdc..76a123fc 100644 --- a/modules/eks/main.tf +++ b/modules/eks/main.tf @@ -252,5 +252,6 @@ locals { }] } kubeconfig = local.kubeconfig + calico = var.calico } } diff --git a/modules/eks/submodules/k8s/README.md b/modules/eks/submodules/k8s/README.md index 1d7bc85a..c31aba67 100644 --- a/modules/eks/submodules/k8s/README.md +++ b/modules/eks/submodules/k8s/README.md @@ -33,8 +33,7 @@ No modules. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [bastion\_info](#input\_bastion\_info) | user = Bastion username.
vpc_id = string
subnets = object({
public = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
private = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
pod = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
})
vpc_cidrs = optional(string, "10.0.0.0/16")
})
object({| n/a | yes | -| [calico\_version](#input\_calico\_version) | Calico operator version. | `string` | `"v3.25.2"` | no | -| [eks\_info](#input\_eks\_info) | cluster = {
user = string
public_ip = string
security_group_id = string
ssh_bastion_command = string
})
object({| n/a | yes | +| [eks\_info](#input\_eks\_info) | cluster = {
cluster = object({
version = string
arn = string
security_group_id = string
endpoint = string
roles = list(object({
name = string
arn = string
}))
custom_roles = list(object({
rolearn = string
username = string
groups = list(string)
}))
oidc = object({
arn = string
url = string
})
})
nodes = object({
nodes_master = bool
security_group_id = string
roles = list(object({
name = string
arn = string
}))
})
kubeconfig = object({
path = string
extra_args = string
})
})
object({| n/a | yes | | [ssh\_key](#input\_ssh\_key) | path = SSH private key filepath.
cluster = object({
version = string
arn = string
security_group_id = string
endpoint = string
roles = list(object({
name = string
arn = string
}))
custom_roles = list(object({
rolearn = string
username = string
groups = list(string)
}))
oidc = object({
arn = string
url = string
})
})
nodes = object({
nodes_master = bool
security_group_id = string
roles = list(object({
name = string
arn = string
}))
})
kubeconfig = object({
path = string
extra_args = string
})
calico = object({
version = string
image_registry = string
})
})
object({| n/a | yes | | [use\_fips\_endpoint](#input\_use\_fips\_endpoint) | Use aws FIPS endpoints | `bool` | `false` | no | diff --git a/modules/eks/submodules/k8s/main.tf b/modules/eks/submodules/k8s/main.tf index 4e6c43a9..62fe0cb3 100644 --- a/modules/eks/submodules/k8s/main.tf +++ b/modules/eks/submodules/k8s/main.tf @@ -13,41 +13,45 @@ locals { resources_directory = path.cwd templates_dir = "${path.module}/templates" + k8s_functions_sh = { + filename = local.k8s_functions_sh_filename + content = templatefile("${local.templates_dir}/${local.k8s_functions_sh_template}", { + kubeconfig_path = var.eks_info.kubeconfig.path + k8s_tunnel_port = random_integer.port.result + aws_auth_yaml = basename(local.aws_auth_filename) + ssh_pvt_key_path = var.ssh_key.path + eks_cluster_arn = var.eks_info.cluster.arn + bastion_user = var.bastion_info != null ? var.bastion_info.user : "" + bastion_public_ip = var.bastion_info != null ? var.bastion_info.public_ip : "" + calico_version = var.eks_info.calico.version + calico_fips_mode = var.use_fips_endpoint ? "Enabled" : "Disabled" + calico_image_registry = var.eks_info.calico.image_registry + }) + } + + aws_auth = { + filename = local.aws_auth_filename + content = templatefile("${local.templates_dir}/${local.aws_auth_template}", + { + nodes_master = try(var.eks_info.nodes.nodes_master, false) + eks_node_role_arns = toset(var.eks_info.nodes.roles[*].arn) + eks_master_role_arns = toset(var.eks_info.cluster.roles[*].arn) + eks_custom_role_maps = var.eks_info.cluster.custom_roles + }) + } + templates = { - k8s_functions_sh = { - filename = local.k8s_functions_sh_filename - content = templatefile("${local.templates_dir}/${local.k8s_functions_sh_template}", { - kubeconfig_path = var.eks_info.kubeconfig.path - k8s_tunnel_port = random_integer.port.result - aws_auth_yaml = basename(local.aws_auth_filename) - ssh_pvt_key_path = var.ssh_key.path - eks_cluster_arn = var.eks_info.cluster.arn - calico_version = var.calico_version - bastion_user = var.bastion_info != null ? var.bastion_info.user : "" - bastion_public_ip = var.bastion_info != null ? var.bastion_info.public_ip : "" - calico_fips_mode = var.use_fips_endpoint ? "Enabled" : "Disabled" - }) - } + k8s_functions_sh = local.k8s_functions_sh + aws_auth = local.aws_auth k8s_presetup = { filename = local.k8s_pre_setup_sh_file content = templatefile("${local.templates_dir}/${local.k8s_pre_setup_sh_template}", { - k8s_functions_sh_filename = local.k8s_functions_sh_filename + k8s_functions_sh_filename = local.k8s_functions_sh.filename + hash = join("-", [md5(local.k8s_functions_sh.content), md5(local.aws_auth.content)]) use_fips_endpoint = tostring(var.use_fips_endpoint) }) } - - aws_auth = { - filename = local.aws_auth_filename - content = templatefile("${local.templates_dir}/${local.aws_auth_template}", - { - nodes_master = try(var.eks_info.nodes.nodes_master, false) - eks_node_role_arns = toset(var.eks_info.nodes.roles[*].arn) - eks_master_role_arns = toset(var.eks_info.cluster.roles[*].arn) - eks_custom_role_maps = var.eks_info.cluster.custom_roles - }) - - } } } diff --git a/modules/eks/submodules/k8s/templates/k8s-functions.sh.tftpl b/modules/eks/submodules/k8s/templates/k8s-functions.sh.tftpl index 82172f90..186616ac 100644 --- a/modules/eks/submodules/k8s/templates/k8s-functions.sh.tftpl +++ b/modules/eks/submodules/k8s/templates/k8s-functions.sh.tftpl @@ -119,8 +119,9 @@ install_calico() { --namespace "tigera-operator" \ --set installation.kubernetesProvider=EKS \ --set installation.cni.type=AmazonVPC \ - --set installation.registry="quay.io/" \ + --set installation.registry="${calico_image_registry}/" \ --set installation.fipsMode="${calico_fips_mode}" \ + --wait \ --timeout 10m \ --create-namespace \ --install diff --git a/modules/eks/submodules/k8s/templates/k8s-pre-setup.sh.tftpl b/modules/eks/submodules/k8s/templates/k8s-pre-setup.sh.tftpl index d8d2d67f..70f09f9b 100644 --- a/modules/eks/submodules/k8s/templates/k8s-pre-setup.sh.tftpl +++ b/modules/eks/submodules/k8s/templates/k8s-pre-setup.sh.tftpl @@ -1,6 +1,8 @@ #!/usr/bin/env bash set -euo pipefail +# ${hash} + source ${k8s_functions_sh_filename} export AWS_USE_FIPS_ENDPOINT=${use_fips_endpoint} diff --git a/modules/eks/submodules/k8s/variables.tf b/modules/eks/submodules/k8s/variables.tf index 33072aaa..51eaec3d 100644 --- a/modules/eks/submodules/k8s/variables.tf +++ b/modules/eks/submodules/k8s/variables.tf @@ -1,9 +1,3 @@ -variable "calico_version" { - type = string - description = "Calico operator version." - default = "v3.25.2" -} - variable "bastion_info" { description = <
path = string
key_pair_name = string
})
object({| n/a | yes | | [network\_info](#input\_network\_info) | id = VPC ID.
key_id = string
key_arn = string
enabled = bool
})
object({| n/a | yes | +| [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes | | [storage](#input\_storage) | storage = {
vpc_id = string
subnets = object({
public = optional(list(object({
name = string
subnet_id = string
az = string
az_id = string
})), [])
private = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
pod = optional(list(object({
name = string
subnet_id = string
az = string
az_id = string
})), [])
})
})
object({| n/a | yes | | [use\_fips\_endpoint](#input\_use\_fips\_endpoint) | Use aws FIPS endpoints | `bool` | `false` | no | @@ -76,5 +80,5 @@ No modules. | Name | Description | |------|-------------| -| [info](#output\_info) | efs = {
efs = optional(object({
access_point_path = optional(string)
backup_vault = optional(object({
create = optional(bool)
force_destroy = optional(bool)
backup = optional(object({
schedule = optional(string)
cold_storage_after = optional(number)
delete_after = optional(number)
}))
}))
}))
s3 = optional(object({
force_destroy_on_deletion = optional(bool)
}))
ecr = optional(object({
force_destroy_on_deletion = optional(bool)
}))
enable_remote_backup = optional(bool)
costs_enabled = optional(bool)
})