diff --git a/.circleci/config.yml b/.circleci/config.yml index 208ec288..eb476826 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,7 +6,7 @@ parameters: default: "v3.11.2" terraform_version: type: string - default: "1.7.3" + default: "1.9.3" hcledit_version: type: string default: "0.2.9" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 13d110a4..36cf374b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ ## NOTE: Changes(rename/add/delete) to pre-commit ids need to be replicated in .github/workflows/terraform-checks.yml(GHA). repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v4.6.0 hooks: - id: check-merge-conflict - id: end-of-file-fixer @@ -13,67 +13,67 @@ repos: - id: circleci-validate args: [--org-slug, github/cerebrotech] - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.20.0 + rev: 0.29.1 hooks: - id: check-github-workflows - id: check-dependabot - id: check-github-actions - repo: https://github.com/antonbabenko/pre-commit-terraform - rev: v1.88.4 + rev: v1.92.1 hooks: - id: terraform_validate # See #4 on https://github.com/antonbabenko/pre-commit-terraform#terraform_validate exclude: (modules/eks/[^/]+$|modules/infra/submodules/cost-usage-report) args: - - '--hook-config=--retry-once-with-cleanup=true' + - "--hook-config=--retry-once-with-cleanup=true" - id: terraform_providers_lock args: - --tf-init-args=-upgrade - id: terraform_docs args: - - '--args=--lockfile=false' - - '--hook-config=--path-to-file=README.md' - - '--hook-config=--add-to-existing-file=true' - - '--hook-config=--create-file-if-not-exist=true' - - '--hook-config=--recursive.enabled=true' - - '--hook-config=--recursive.path=submodules' + - "--args=--lockfile=false" + - "--hook-config=--path-to-file=README.md" + - "--hook-config=--add-to-existing-file=true" + - "--hook-config=--create-file-if-not-exist=true" + - "--hook-config=--recursive.enabled=true" + - "--hook-config=--recursive.path=submodules" - id: terraform_fmt - id: terraform_tflint args: - - '--args=--config=__GIT_WORKING_DIR__/.tflint.hcl' - - '--args=--only=terraform_deprecated_interpolation' - - '--args=--only=terraform_deprecated_index' - - '--args=--only=terraform_unused_declarations' - - '--args=--only=terraform_comment_syntax' - - '--args=--only=terraform_documented_outputs' - - '--args=--only=terraform_documented_variables' - - '--args=--only=terraform_typed_variables' - - '--args=--only=terraform_module_pinned_source' - - '--args=--only=terraform_naming_convention' - - '--args=--only=terraform_required_version' - - '--args=--only=terraform_required_providers' - - '--args=--only=terraform_standard_module_structure' - - '--args=--only=terraform_workspace_remote' - - '--args=--enable-rule=aws_iam_policy_document_gov_friendly_arns' - - '--args=--enable-rule=aws_iam_policy_gov_friendly_arns' - - '--args=--enable-rule=aws_iam_role_policy_gov_friendly_arns' + - "--args=--config=__GIT_WORKING_DIR__/.tflint.hcl" + - "--args=--only=terraform_deprecated_interpolation" + - "--args=--only=terraform_deprecated_index" + - "--args=--only=terraform_unused_declarations" + - "--args=--only=terraform_comment_syntax" + - "--args=--only=terraform_documented_outputs" + - "--args=--only=terraform_documented_variables" + - "--args=--only=terraform_typed_variables" + - "--args=--only=terraform_module_pinned_source" + - "--args=--only=terraform_naming_convention" + - "--args=--only=terraform_required_version" + - "--args=--only=terraform_required_providers" + - "--args=--only=terraform_standard_module_structure" + - "--args=--only=terraform_workspace_remote" + - "--args=--enable-rule=aws_iam_policy_document_gov_friendly_arns" + - "--args=--enable-rule=aws_iam_policy_gov_friendly_arns" + - "--args=--enable-rule=aws_iam_role_policy_gov_friendly_arns" - id: terraform_checkov args: - - '--args=--compact' - - '--args=--quiet' - - '--args=--skip-check CKV_CIRCLECIPIPELINES_2,CKV_CIRCLECIPIPELINES_6,CKV2_AWS_11,CKV2_AWS_12,CKV2_AWS_6,CKV_AWS_109,CKV_AWS_111,CKV_AWS_135,CKV_AWS_144,CKV_AWS_145,CKV_AWS_158,CKV_AWS_18,CKV_AWS_184,CKV_AWS_19,CKV_AWS_21,CKV_AWS_66,CKV_AWS_88,CKV2_GHA_1,CKV_AWS_163,CKV_AWS_39,CKV_AWS_38,CKV2_AWS_61,CKV2_AWS_62,CKV_AWS_136,CKV_AWS_329,CKV_AWS_338,CKV_AWS_339,CKV_AWS_341,CKV_AWS_356,CKV2_AWS_19,CKV2_AWS_5,CKV_AWS_150,CKV_AWS_123,CKV2_AWS_65,CKV2_AWS_67' + - "--args=--compact" + - "--args=--quiet" + - "--args=--skip-check CKV_CIRCLECIPIPELINES_2,CKV_CIRCLECIPIPELINES_6,CKV2_AWS_11,CKV2_AWS_12,CKV2_AWS_6,CKV_AWS_109,CKV_AWS_111,CKV_AWS_135,CKV_AWS_144,CKV_AWS_145,CKV_AWS_158,CKV_AWS_18,CKV_AWS_184,CKV_AWS_19,CKV_AWS_21,CKV_AWS_66,CKV_AWS_88,CKV2_GHA_1,CKV_AWS_163,CKV_AWS_39,CKV_AWS_38,CKV2_AWS_61,CKV2_AWS_62,CKV_AWS_136,CKV_AWS_329,CKV_AWS_338,CKV_AWS_339,CKV_AWS_341,CKV_AWS_356,CKV2_AWS_19,CKV2_AWS_5,CKV_AWS_150,CKV_AWS_123,CKV2_AWS_65,CKV2_AWS_67,CKV2_AWS_57,CKV_AWS_149" - id: terraform_trivy args: - - '--args=--severity=HIGH,CRITICAL' - - '--args=--ignorefile=__GIT_WORKING_DIR__/.trivyignore' - - '--args=--exit-code=1' + - "--args=--severity=HIGH,CRITICAL" + - "--args=--ignorefile=__GIT_WORKING_DIR__/.trivyignore" + - "--args=--exit-code=1" - repo: local hooks: - id: check_aws_partition name: Check for hard coded AWS partition entry: ./bin/pre-commit/check-aws-partition.sh language: script - exclude: '^(bin|examples)' + exclude: "^(bin|examples)" - id: validate_iam_bootstrap name: Validate IAM bootstrap entry: ./bin/pre-commit/validate-iam-bootstrap.py diff --git a/bin/pre-commit/validate-iam-bootstrap.py b/bin/pre-commit/validate-iam-bootstrap.py index 09f2ea91..9e4a19df 100755 --- a/bin/pre-commit/validate-iam-bootstrap.py +++ b/bin/pre-commit/validate-iam-bootstrap.py @@ -6,7 +6,5 @@ for bootstrap in glob("modules/iam-bootstrap/bootstrap-*.json"): text = json.loads(Path(bootstrap).read_text()) - if len(json.dumps(text, separators=(",", ":"))) > 6000: - raise SystemExit( - f"{bootstrap} is over 6k characters, make sure it's under the IAM PolicySize quota (6144)" - ) + if len(json.dumps(text, separators=(",", ":"))) > 6050: + raise SystemExit(f"{bootstrap} is over 6k characters, make sure it's under the IAM PolicySize quota (6144)") diff --git a/modules/eks/README.md b/modules/eks/README.md index 35589775..9a9679b8 100644 --- a/modules/eks/README.md +++ b/modules/eks/README.md @@ -44,9 +44,9 @@ | [aws_security_group.eks_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_security_group.eks_nodes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_security_group_rule.bastion_eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | -| [aws_security_group_rule.efs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | | [aws_security_group_rule.eks_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | | [aws_security_group_rule.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_security_group_rule.shared_storage](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | | [null_resource.kubeconfig](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [terraform_data.run_k8s_pre_setup](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/resources/data) | resource | | [aws_caller_identity.aws_account](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | @@ -72,8 +72,9 @@ | [calico](#input\_calico) | calico = {
version = Configure the version for Calico
image\_registry = Configure the image registry for Calico
} |
object({
image_registry = optional(string, "quay.io")
version = optional(string, "v3.27.3")
})
| `{}` | no | | [create\_eks\_role\_arn](#input\_create\_eks\_role\_arn) | Role arn to assume during the EKS cluster creation. | `string` | n/a | yes | | [deploy\_id](#input\_deploy\_id) | Domino Deployment ID | `string` | n/a | yes | -| [efs\_security\_group](#input\_efs\_security\_group) | Security Group ID for EFS | `string` | n/a | yes | +| [efs\_security\_group](#input\_efs\_security\_group) | Security Group ID for EFS | `string` | `null` | no | | [eks](#input\_eks) | service\_ipv4\_cidr = CIDR for EKS cluster kubernetes\_network\_config.
creation\_role\_name = Name of the role to import.
k8s\_version = EKS cluster k8s version.
nodes\_master Grants the nodes role system:master access. NOT recomended
kubeconfig = {
extra\_args = Optional extra args when generating kubeconfig.
path = Fully qualified path name to write the kubeconfig file.
}
public\_access = {
enabled = Enable EKS API public endpoint.
cidrs = List of CIDR ranges permitted for accessing the EKS public endpoint.
}
Custom role maps for aws auth configmap
custom\_role\_maps = {
rolearn = string
username = string
groups = list(string)
}
master\_role\_names = IAM role names to be added as masters in eks.
cluster\_addons = EKS cluster addons. vpc-cni is installed separately.
vpc\_cni = Configuration for AWS VPC CNI
ssm\_log\_group\_name = CloudWatch log group to send the SSM session logs to.
identity\_providers = Configuration for IDP(Identity Provider).
} |
object({
service_ipv4_cidr = optional(string, "172.20.0.0/16")
creation_role_name = optional(string, null)
k8s_version = optional(string, "1.27")
nodes_master = optional(bool, false)
kubeconfig = optional(object({
extra_args = optional(string, "")
path = optional(string, null)
}), {})
public_access = optional(object({
enabled = optional(bool, false)
cidrs = optional(list(string), [])
}), {})
custom_role_maps = optional(list(object({
rolearn = string
username = string
groups = list(string)
})), [])
master_role_names = optional(list(string), [])
cluster_addons = optional(list(string), ["kube-proxy", "coredns", "vpc-cni"])
ssm_log_group_name = optional(string, "session-manager")
vpc_cni = optional(object({
prefix_delegation = optional(bool, false)
annotate_pod_ip = optional(bool, true)
}))
identity_providers = optional(list(object({
client_id = string
groups_claim = optional(string, null)
groups_prefix = optional(string, null)
identity_provider_config_name = string
issuer_url = optional(string, null)
required_claims = optional(string, null)
username_claim = optional(string, null)
username_prefix = optional(string, null)
})), []),
})
| `{}` | no | +| [fsx](#input\_fsx) | Configuration for FSX |
object({
astra_trident_operator_role = optional(string, null)
svm = optional(object({
id = optional(string, null)
management_ip = optional(string, null)
nfs_ip = optional(string, null)
}), null)
filesystem = optional(object({
id = optional(string, null)
security_group_id = optional(string, null)
}), null)
})
| `null` | no | | [ignore\_tags](#input\_ignore\_tags) | Tag keys to be ignored by the aws provider. | `list(string)` | `[]` | no | | [kms\_info](#input\_kms\_info) | key\_id = KMS key id.
key\_arn = KMS key arn.
enabled = KMS key is enabled |
object({
key_id = string
key_arn = string
enabled = bool
})
| n/a | yes | | [network\_info](#input\_network\_info) | id = VPC ID.
subnets = {
public = List of public Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
private = List of private Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
pod = List of pod Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
} |
object({
vpc_id = string
subnets = object({
public = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
private = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
pod = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
})
vpc_cidrs = optional(string, "10.0.0.0/16")
})
| n/a | yes | diff --git a/modules/eks/node-group.tf b/modules/eks/node-group.tf index cd0f015a..3493fa10 100644 --- a/modules/eks/node-group.tf +++ b/modules/eks/node-group.tf @@ -51,12 +51,36 @@ resource "aws_security_group_rule" "node" { ) } -resource "aws_security_group_rule" "efs" { - security_group_id = var.efs_security_group - protocol = "tcp" - from_port = 2049 - to_port = 2049 - type = "ingress" - description = "EFS access" +moved { + from = aws_security_group_rule.efs + to = aws_security_group_rule.shared_storage["efs_2049_2049"] +} + +### FSX + +locals { + shared_storage_type = var.fsx != null ? "fsx" : "efs" + inbound_rules = local.shared_storage_type == "fsx" ? { + rules = [ + { protocol = "all", from_port = 0, to_port = 65535, description = "All traffic from EKS nodes." }, + ] + security_group_id = var.fsx.filesystem.security_group_id + } : { + rules = [ + { protocol = "tcp", from_port = 2049, to_port = 2049, description = "EFS access" } + ] + security_group_id = var.efs_security_group + } +} + +resource "aws_security_group_rule" "shared_storage" { + for_each = { for r in local.inbound_rules.rules : "${local.shared_storage_type}_${r.from_port}_${r.to_port}" => r } + + security_group_id = local.inbound_rules.security_group_id source_security_group_id = aws_security_group.eks_nodes.id + protocol = each.value.protocol + from_port = each.value.from_port + to_port = each.value.to_port + type = "ingress" + description = each.value.description } diff --git a/modules/eks/submodules/k8s/README.md b/modules/eks/submodules/k8s/README.md index c31aba67..053a7a7b 100644 --- a/modules/eks/submodules/k8s/README.md +++ b/modules/eks/submodules/k8s/README.md @@ -34,6 +34,7 @@ No modules. |------|-------------|------|---------|:--------:| | [bastion\_info](#input\_bastion\_info) | user = Bastion username.
public\_ip = Bastion public ip.
security\_group\_id = Bastion sg id.
ssh\_bastion\_command = Command to ssh onto bastion. |
object({
user = string
public_ip = string
security_group_id = string
ssh_bastion_command = string
})
| n/a | yes | | [eks\_info](#input\_eks\_info) | cluster = {
version = K8s version.
arn = EKS Cluster arn.
security\_group\_id = EKS Cluster security group id.
endpoint = EKS Cluster API endpoint.
roles = Default IAM Roles associated with the EKS cluster. {
name = string
arn = string
}
custom\_roles = Custom IAM Roles associated with the EKS cluster. {
rolearn = string
username = string
groups = list(string)
}
oidc = {
arn = OIDC provider ARN.
url = OIDC provider url.
}
}
nodes = {
security\_group\_id = EKS Nodes security group id.
roles = IAM Roles associated with the EKS Nodes.{
name = string
arn = string
}
}
kubeconfig = Kubeconfig details.{
path = string
extra\_args = string
}
calico = {
version = Configuration the version for Calico
image\_registry = Configure the image registry for Calico
} |
object({
cluster = object({
version = string
arn = string
security_group_id = string
endpoint = string
roles = list(object({
name = string
arn = string
}))
custom_roles = list(object({
rolearn = string
username = string
groups = list(string)
}))
oidc = object({
arn = string
url = string
})
})
nodes = object({
nodes_master = bool
security_group_id = string
roles = list(object({
name = string
arn = string
}))
})
kubeconfig = object({
path = string
extra_args = string
})
calico = object({
version = string
image_registry = string
})
})
| n/a | yes | +| [fsx](#input\_fsx) | Configuration for FSx |
object({
astra_trident_operator_role = optional(string, null)
svm = optional(object({
id = optional(string, null)
management_ip = optional(string, null)
nfs_ip = optional(string, null)
}), null)
filesystem = optional(object({
id = optional(string, null)
}), null)
})
| `null` | no | | [ssh\_key](#input\_ssh\_key) | path = SSH private key filepath.
key\_pair\_name = AWS key\_pair name. |
object({
path = string
key_pair_name = string
})
| n/a | yes | | [use\_fips\_endpoint](#input\_use\_fips\_endpoint) | Use aws FIPS endpoints | `bool` | `false` | no | diff --git a/modules/eks/submodules/k8s/main.tf b/modules/eks/submodules/k8s/main.tf index 62fe0cb3..e56a120d 100644 --- a/modules/eks/submodules/k8s/main.tf +++ b/modules/eks/submodules/k8s/main.tf @@ -26,6 +26,7 @@ locals { calico_version = var.eks_info.calico.version calico_fips_mode = var.use_fips_endpoint ? "Enabled" : "Disabled" calico_image_registry = var.eks_info.calico.image_registry + fsx = var.fsx }) } diff --git a/modules/eks/submodules/k8s/variables.tf b/modules/eks/submodules/k8s/variables.tf index 51eaec3d..1ac46a2d 100644 --- a/modules/eks/submodules/k8s/variables.tf +++ b/modules/eks/submodules/k8s/variables.tf @@ -105,3 +105,19 @@ variable "use_fips_endpoint" { type = bool default = false } + +variable "fsx" { + description = "Configuration for FSx" + type = object({ + astra_trident_operator_role = optional(string, null) + svm = optional(object({ + id = optional(string, null) + management_ip = optional(string, null) + nfs_ip = optional(string, null) + }), null) + filesystem = optional(object({ + id = optional(string, null) + }), null) + }) + default = null +} diff --git a/modules/eks/variables.tf b/modules/eks/variables.tf index c974a766..abf39659 100644 --- a/modules/eks/variables.tf +++ b/modules/eks/variables.tf @@ -88,6 +88,7 @@ variable "node_iam_policies" { variable "efs_security_group" { description = "Security Group ID for EFS" type = string + default = null } variable "bastion_info" { @@ -273,3 +274,21 @@ variable "calico" { default = {} } + + +variable "fsx" { + description = "Configuration for FSX" + type = object({ + astra_trident_operator_role = optional(string, null) + svm = optional(object({ + id = optional(string, null) + management_ip = optional(string, null) + nfs_ip = optional(string, null) + }), null) + filesystem = optional(object({ + id = optional(string, null) + security_group_id = optional(string, null) + }), null) + }) + default = null +} diff --git a/modules/iam-bootstrap/bootstrap-1.json b/modules/iam-bootstrap/bootstrap-1.json index b493da4c..b5294f32 100644 --- a/modules/iam-bootstrap/bootstrap-1.json +++ b/modules/iam-bootstrap/bootstrap-1.json @@ -124,6 +124,7 @@ "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress", "ec2:RunInstances", + "ec2:UpdateSecurityGroupRuleDescriptions*", "ssm:GetParameter" ], "Resource": ["*"] @@ -146,6 +147,8 @@ "Effect": "Allow", "Action": [ "kms:CreateGrant", + "kms:ListGrants", + "kms:RevokeGrant", "kms:CreateAlias", "kms:CreateKey", "kms:DescribeKey", diff --git a/modules/iam-bootstrap/bootstrap-2.json b/modules/iam-bootstrap/bootstrap-2.json index 52573bf7..a203c3e9 100644 --- a/modules/iam-bootstrap/bootstrap-2.json +++ b/modules/iam-bootstrap/bootstrap-2.json @@ -73,6 +73,22 @@ "ecr:DescribePullThroughCacheRules" ], "Resource": ["*"] + }, + { + "Sid": "FSXUngated", + "Effect": "Allow", + "Action": [ + "fsx:*" + ], + "Resource": ["*"] + }, + { + "Sid": "SecretsManagerUngated", + "Effect": "Allow", + "Action": [ + "secretsmanager:*" + ], + "Resource": ["*"] } ] } diff --git a/modules/infra/README.md b/modules/infra/README.md index c1870e87..9459496c 100644 --- a/modules/infra/README.md +++ b/modules/infra/README.md @@ -63,7 +63,7 @@ | [network](#input\_network) | vpc = {
id = Existing vpc id, it will bypass creation by this module.
subnets = {
private = Existing private subnets.
public = Existing public subnets.
pod = Existing pod subnets.
}), {})
}), {})
network\_bits = {
public = Number of network bits to allocate to the public subnet. i.e /27 -> 32 IPs.
private = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs.
pod = Number of network bits to allocate to the private subnet. i.e /19 -> 8,192 IPs.
}
cidrs = {
vpc = The IPv4 CIDR block for the VPC.
pod = The IPv4 CIDR block for the Pod subnets.
}
use\_pod\_cidr = Use additional pod CIDR range (ie 100.64.0.0/16) for pod networking. |
object({
vpc = optional(object({
id = optional(string, null)
subnets = optional(object({
private = optional(list(string), [])
public = optional(list(string), [])
pod = optional(list(string), [])
}), {})
}), {})
network_bits = optional(object({
public = optional(number, 27)
private = optional(number, 19)
pod = optional(number, 19)
}
), {})
cidrs = optional(object({
vpc = optional(string, "10.0.0.0/16")
pod = optional(string, "100.64.0.0/16")
}), {})
use_pod_cidr = optional(bool, true)
})
| `{}` | no | | [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes | | [ssh\_pvt\_key\_path](#input\_ssh\_pvt\_key\_path) | SSH private key filepath. | `string` | n/a | yes | -| [storage](#input\_storage) | storage = {
efs = {
access\_point\_path = Filesystem path for efs.
backup\_vault = {
create = Create backup vault for EFS toggle.
force\_destroy = Toggle to allow automatic destruction of all backups when destroying.
backup = {
schedule = Cron-style schedule for EFS backup vault (default: once a day at 12pm).
cold\_storage\_after = Move backup data to cold storage after this many days.
delete\_after = Delete backup data after this many days.
}
}
}
s3 = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets.
}
ecr = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the ECR repositories. if 'false' terraform will NOT be able to delete non-empty repositories.
}
enable\_remote\_backup = Enable tagging required for cross-account backups
costs\_enabled = Determines whether to provision domino cost related infrastructures, ie, long term storage
}
} |
object({
efs = optional(object({
access_point_path = optional(string, "/domino")
backup_vault = optional(object({
create = optional(bool, true)
force_destroy = optional(bool, true)
backup = optional(object({
schedule = optional(string, "0 12 * * ? *")
cold_storage_after = optional(number, 35)
delete_after = optional(number, 125)
}), {})
}), {})
}), {})
s3 = optional(object({
force_destroy_on_deletion = optional(bool, true)
}), {})
ecr = optional(object({
force_destroy_on_deletion = optional(bool, true)
}), {}),
enable_remote_backup = optional(bool, false)
costs_enabled = optional(bool, true)
})
| `{}` | no | +| [storage](#input\_storage) | storage = {
filesystem\_type = File system type(fsx\|efs)
efs = {
access\_point\_path = Filesystem path for efs.
backup\_vault = {
create = Create backup vault for EFS toggle.
force\_destroy = Toggle to allow automatic destruction of all backups when destroying.
backup = {
schedule = Cron-style schedule for EFS backup vault (default: once a day at 12pm).
cold\_storage\_after = Move backup data to cold storage after this many days.
delete\_after = Delete backup data after this many days.
}
}
}
fsx = {
deployment\_type = fsx ontap deployment type,('MULTI\_AZ\_1', 'MULTI\_AZ\_2', 'SINGLE\_AZ\_1', 'SINGLE\_AZ\_2')
storage\_capacity = Filesystem Storage capacity
throughput\_capacity = Filesystem throughput capacity
automatic\_backup\_retention\_days = How many days to keep backups
daily\_automatic\_backup\_start\_time = Start time in 'HH:MM' format to initiate backups
}
s3 = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets.
}
ecr = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the ECR repositories. if 'false' terraform will NOT be able to delete non-empty repositories.
}
enable\_remote\_backup = Enable tagging required for cross-account backups
costs\_enabled = Determines whether to provision domino cost related infrastructures, ie, long term storage
}
} |
object({
filesystem_type = optional(string, "efs")
efs = optional(object({
access_point_path = optional(string, "/domino")
backup_vault = optional(object({
create = optional(bool, true)
force_destroy = optional(bool, true)
backup = optional(object({
schedule = optional(string, "0 12 * * ? *")
cold_storage_after = optional(number, 35)
delete_after = optional(number, 125)
}), {})
}), {})
}), {})
fsx = optional(object({
deployment_type = optional(string, "SINGLE_AZ_2")
storage_capacity = optional(number, 1024)
throughput_capacity = optional(number, 1536)
automatic_backup_retention_days = optional(number, 90)
daily_automatic_backup_start_time = optional(string, "00:00")
}), {})
s3 = optional(object({
force_destroy_on_deletion = optional(bool, true)
}), {})
ecr = optional(object({
force_destroy_on_deletion = optional(bool, true)
}), {}),
enable_remote_backup = optional(bool, false)
costs_enabled = optional(bool, true)
})
| `{}` | no | | [tags](#input\_tags) | Deployment tags. | `map(string)` | `{}` | no | | [use\_fips\_endpoint](#input\_use\_fips\_endpoint) | Use aws FIPS endpoints | `bool` | `false` | no | diff --git a/modules/infra/kms.tf b/modules/infra/kms.tf index e91a9d84..6ffd5762 100644 --- a/modules/infra/kms.tf +++ b/modules/infra/kms.tf @@ -2,6 +2,7 @@ locals { aws_account_id = data.aws_caller_identity.aws_account.account_id } + data "aws_iam_policy_document" "kms_key_global" { count = local.create_kms_key @@ -34,7 +35,7 @@ data "aws_iam_policy_document" "kms_key_global" { type = "AWS" identifiers = [ "arn:${data.aws_partition.current.partition}:iam::${local.aws_account_id}:root", - "arn:${data.aws_partition.current.partition}:iam::${local.aws_account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling" + "arn:${data.aws_partition.current.partition}:iam::${local.aws_account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", ] } } @@ -54,6 +55,7 @@ data "aws_iam_policy_document" "kms_key_global" { type = "AWS" identifiers = ["arn:${data.aws_partition.current.partition}:iam::${local.aws_account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling"] } + } statement { diff --git a/modules/infra/outputs.tf b/modules/infra/outputs.tf index 5d7d1dea..5d5a5d43 100644 --- a/modules/infra/outputs.tf +++ b/modules/infra/outputs.tf @@ -65,7 +65,7 @@ output "default_node_groups" { output "efs_security_group" { description = "Security Group ID for EFS" - value = module.storage.info.efs.security_group_id + value = var.storage.filesystem_type == "efs" ? module.storage.info.efs.security_group_id : null } output "node_iam_policies" { diff --git a/modules/infra/submodules/storage/README.md b/modules/infra/submodules/storage/README.md index 9fafc1a7..f0f05c3e 100644 --- a/modules/infra/submodules/storage/README.md +++ b/modules/infra/submodules/storage/README.md @@ -7,12 +7,14 @@ |------|---------| | [terraform](#requirement\_terraform) | >= 1.0 | | [aws](#requirement\_aws) | ~> 5 | +| [random](#requirement\_random) | >= 3.6.2 | ## Providers | Name | Version | |------|---------| | [aws](#provider\_aws) | ~> 5 | +| [random](#provider\_random) | >= 3.6.2 | | [terraform](#provider\_terraform) | n/a | ## Modules @@ -31,6 +33,8 @@ No modules. | [aws_efs_access_point.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/efs_access_point) | resource | | [aws_efs_file_system.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/efs_file_system) | resource | | [aws_efs_mount_target.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/efs_mount_target) | resource | +| [aws_fsx_ontap_file_system.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/fsx_ontap_file_system) | resource | +| [aws_fsx_ontap_storage_virtual_machine.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/fsx_ontap_storage_virtual_machine) | resource | | [aws_iam_policy.ecr](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_policy.s3](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_role.efs_backup_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | @@ -48,7 +52,12 @@ No modules. | [aws_s3_bucket_server_side_encryption_configuration.buckets_encryption](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_server_side_encryption_configuration) | resource | | [aws_s3_bucket_server_side_encryption_configuration.monitoring](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_server_side_encryption_configuration) | resource | | [aws_s3_bucket_versioning.buckets_versioning](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_versioning) | resource | +| [aws_secretsmanager_secret.fsx](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret) | resource | +| [aws_secretsmanager_secret_version.fsx](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret_version) | resource | | [aws_security_group.efs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_security_group.fsx](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_security_group_rule.fsx_outbound](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [random_password.fsx](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource | | [terraform_data.check_backup_role](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/resources/data) | resource | | [terraform_data.set_monitoring_private_acl](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/resources/data) | resource | | [aws_caller_identity.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | @@ -64,6 +73,7 @@ No modules. | [aws_iam_policy_document.registry](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.s3](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_secretsmanager_secret_version.fsx_creds](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_secret_version) | data source | ## Inputs @@ -73,7 +83,7 @@ No modules. | [kms\_info](#input\_kms\_info) | key\_id = KMS key id.
key\_arn = KMS key arn.
enabled = KMS key is enabled |
object({
key_id = string
key_arn = string
enabled = bool
})
| n/a | yes | | [network\_info](#input\_network\_info) | id = VPC ID.
subnets = {
public = List of public Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
private = List of private Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
pod = List of pod Subnets.
[{
name = Subnet name.
subnet\_id = Subnet ud
az = Subnet availability\_zone
az\_id = Subnet availability\_zone\_id
}]
} |
object({
vpc_id = string
subnets = object({
public = optional(list(object({
name = string
subnet_id = string
az = string
az_id = string
})), [])
private = list(object({
name = string
subnet_id = string
az = string
az_id = string
}))
pod = optional(list(object({
name = string
subnet_id = string
az = string
az_id = string
})), [])
})
})
| n/a | yes | | [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes | -| [storage](#input\_storage) | storage = {
efs = {
access\_point\_path = Filesystem path for efs.
backup\_vault = {
create = Create backup vault for EFS toggle.
force\_destroy = Toggle to allow automatic destruction of all backups when destroying.
backup = {
schedule = Cron-style schedule for EFS backup vault (default: once a day at 12pm).
cold\_storage\_after = Move backup data to cold storage after this many days.
delete\_after = Delete backup data after this many days.
}
}
}
s3 = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets.
}
ecr = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the ECR repositories. if 'false' terraform will NOT be able to delete non-empty repositories.
}
enable\_remote\_backup = Enable tagging required for cross-account backups
costs\_enabled = Determines whether to provision domino cost related infrastructures, ie, long term storage
}
} |
object({
efs = optional(object({
access_point_path = optional(string)
backup_vault = optional(object({
create = optional(bool)
force_destroy = optional(bool)
backup = optional(object({
schedule = optional(string)
cold_storage_after = optional(number)
delete_after = optional(number)
}))
}))
}))
s3 = optional(object({
force_destroy_on_deletion = optional(bool)
}))
ecr = optional(object({
force_destroy_on_deletion = optional(bool)
}))
enable_remote_backup = optional(bool)
costs_enabled = optional(bool)
})
| n/a | yes | +| [storage](#input\_storage) | storage = {
filesystem\_type = File system type(fsx\|efs)
efs = {
access\_point\_path = Filesystem path for efs.
backup\_vault = {
create = Create backup vault for EFS toggle.
force\_destroy = Toggle to allow automatic destruction of all backups when destroying.
backup = {
schedule = Cron-style schedule for EFS backup vault (default: once a day at 12pm).
cold\_storage\_after = Move backup data to cold storage after this many days.
delete\_after = Delete backup data after this many days.
}
}
}
fsx = {
deployment\_type = fsx ontap deployment type,('MULTI\_AZ\_1', 'MULTI\_AZ\_2', 'SINGLE\_AZ\_1', 'SINGLE\_AZ\_2')
storage\_capacity = Filesystem Storage capacity
throughput\_capacity = Filesystem throughput capacity
automatic\_backup\_retention\_days = How many days to keep backups
daily\_automatic\_backup\_start\_time = Start time in 'HH:MM' format to initiate backups
}
s3 = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets.
}
ecr = {
force\_destroy\_on\_deletion = Toogle to allow recursive deletion of all objects in the ECR repositories. if 'false' terraform will NOT be able to delete non-empty repositories.
}
enable\_remote\_backup = Enable tagging required for cross-account backups
costs\_enabled = Determines whether to provision domino cost related infrastructures, ie, long term storage
}
} |
object({
filesystem_type = string
efs = optional(object({
access_point_path = optional(string)
backup_vault = optional(object({
create = optional(bool)
force_destroy = optional(bool)
backup = optional(object({
schedule = optional(string)
cold_storage_after = optional(number)
delete_after = optional(number)
}))
}))
}))
fsx = optional(object({
deployment_type = optional(string)
storage_capacity = optional(number)
throughput_capacity = optional(number)
automatic_backup_retention_days = optional(number)
daily_automatic_backup_start_time = optional(string)
}))
s3 = optional(object({
force_destroy_on_deletion = optional(bool)
}))
ecr = optional(object({
force_destroy_on_deletion = optional(bool)
}))
enable_remote_backup = optional(bool)
costs_enabled = optional(bool)
})
| n/a | yes | | [use\_fips\_endpoint](#input\_use\_fips\_endpoint) | Use aws FIPS endpoints | `bool` | `false` | no | ## Outputs diff --git a/modules/infra/submodules/storage/efs.tf b/modules/infra/submodules/storage/efs.tf index b4e5aa54..ee743162 100644 --- a/modules/infra/submodules/storage/efs.tf +++ b/modules/infra/submodules/storage/efs.tf @@ -1,4 +1,5 @@ resource "aws_efs_file_system" "eks" { + count = local.deploy_efs ? 1 : 0 encrypted = true performance_mode = "generalPurpose" provisioned_throughput_in_mibps = "0" @@ -14,10 +15,10 @@ resource "aws_efs_file_system" "eks" { kms_key_id, ] } - } resource "aws_security_group" "efs" { + count = local.deploy_efs ? 1 : 0 name = "${var.deploy_id}-efs" description = "EFS security group" vpc_id = var.network_info.vpc_id @@ -31,14 +32,15 @@ resource "aws_security_group" "efs" { } resource "aws_efs_mount_target" "eks" { - count = length(local.private_subnet_ids) - file_system_id = aws_efs_file_system.eks.id - security_groups = [aws_security_group.efs.id] + count = local.deploy_efs ? length(local.private_subnet_ids) : 0 + file_system_id = aws_efs_file_system.eks[0].id + security_groups = [aws_security_group.efs[0].id] subnet_id = element(local.private_subnet_ids, count.index) } resource "aws_efs_access_point" "eks" { - file_system_id = aws_efs_file_system.eks.id + count = local.deploy_efs ? 1 : 0 + file_system_id = aws_efs_file_system.eks[0].id posix_user { gid = "0" @@ -55,3 +57,23 @@ resource "aws_efs_access_point" "eks" { path = var.storage.efs.access_point_path } } + +moved { + from = aws_efs_file_system.eks + to = aws_efs_file_system.eks[0] +} + +moved { + from = aws_security_group.efs + to = aws_security_group.efs[0] +} + +moved { + from = aws_efs_mount_target.eks + to = aws_efs_mount_target.eks[0] +} + +moved { + from = aws_efs_access_point.eks + to = aws_efs_access_point.eks[0] +} diff --git a/modules/infra/submodules/storage/efs_backup_vault.tf b/modules/infra/submodules/storage/efs_backup_vault.tf index 7699c97c..c97da9b4 100644 --- a/modules/infra/submodules/storage/efs_backup_vault.tf +++ b/modules/infra/submodules/storage/efs_backup_vault.tf @@ -1,5 +1,9 @@ +locals { + efs_backup_count = local.deploy_efs && var.storage.efs.backup_vault.create ? 1 : 0 +} + resource "aws_backup_vault" "efs" { - count = var.storage.efs.backup_vault.create ? 1 : 0 + count = local.efs_backup_count name = "${var.deploy_id}-efs" force_destroy = var.storage.efs.backup_vault.force_destroy @@ -15,7 +19,7 @@ resource "aws_backup_vault" "efs" { } resource "aws_backup_plan" "efs" { - count = var.storage.efs.backup_vault.create ? 1 : 0 + count = local.efs_backup_count name = "${var.deploy_id}-efs" rule { rule_name = "efs-rule" @@ -32,12 +36,12 @@ resource "aws_backup_plan" "efs" { } data "aws_iam_policy" "aws_backup_role_policy" { - count = var.storage.efs.backup_vault.create ? 1 : 0 + count = local.efs_backup_count name = "AWSBackupServiceRolePolicyForBackup" } resource "aws_iam_role" "efs_backup_role" { - count = var.storage.efs.backup_vault.create ? 1 : 0 + count = local.efs_backup_count name = "${var.deploy_id}-efs-backup" assume_role_policy = jsonencode({ Version = "2012-10-17" @@ -55,13 +59,13 @@ resource "aws_iam_role" "efs_backup_role" { } resource "aws_iam_role_policy_attachment" "efs_backup_role_attach" { - count = var.storage.efs.backup_vault.create ? 1 : 0 + count = local.efs_backup_count role = aws_iam_role.efs_backup_role[0].name policy_arn = data.aws_iam_policy.aws_backup_role_policy[0].arn } resource "terraform_data" "check_backup_role" { - count = var.storage.efs.backup_vault.create ? 1 : 0 + count = local.efs_backup_count provisioner "local-exec" { command = <<-EOF @@ -103,12 +107,12 @@ resource "terraform_data" "check_backup_role" { } resource "aws_backup_selection" "efs" { - count = var.storage.efs.backup_vault.create ? 1 : 0 + count = local.efs_backup_count name = "${var.deploy_id}-efs" plan_id = aws_backup_plan.efs[0].id iam_role_arn = aws_iam_role.efs_backup_role[0].arn - resources = [aws_efs_file_system.eks.arn] + resources = [aws_efs_file_system.eks[0].arn] depends_on = [terraform_data.check_backup_role] } diff --git a/modules/infra/submodules/storage/fsx.tf b/modules/infra/submodules/storage/fsx.tf new file mode 100644 index 00000000..d5b14e2d --- /dev/null +++ b/modules/infra/submodules/storage/fsx.tf @@ -0,0 +1,109 @@ +locals { + fsx_subnet_ids = startswith(var.storage.fsx.deployment_type, "MULTI") ? slice(local.private_subnet_ids, 0, 2) : [local.private_subnet_ids[0]] +} + +resource "aws_security_group" "fsx" { + count = local.deploy_fsx ? 1 : 0 + name = "${var.deploy_id}-fsx" + description = "FSx security group" + vpc_id = var.network_info.vpc_id + + lifecycle { + create_before_destroy = true + } + + tags = { + "Name" = "${var.deploy_id}-fsx" + } +} + +resource "aws_security_group_rule" "fsx_outbound" { + count = local.deploy_fsx ? 1 : 0 + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + security_group_id = aws_security_group.fsx[0].id + cidr_blocks = ["0.0.0.0/0"] + description = "FSX outbound" # https://docs.netapp.com/us-en/bluexp-fsx-ontap/requirements/reference-security-groups-fsx.html#rules-for-fsx-for-ontap +} + +locals { + fsx_ontap_components_user = local.deploy_fsx ? { + filesystem = "fsxadmin" + svm = "vsadmin" + } : {} +} + +resource "random_password" "fsx" { + for_each = local.fsx_ontap_components_user + length = 16 + special = false + min_numeric = 1 + min_upper = 1 + min_lower = 1 +} + +resource "aws_secretsmanager_secret" "fsx" { + for_each = local.fsx_ontap_components_user + name = "${var.deploy_id}-fsx-ontap-${each.key}" + description = "Credentials for ONTAP ${each.key}" + recovery_window_in_days = 0 +} + +resource "aws_secretsmanager_secret_version" "fsx" { + for_each = local.fsx_ontap_components_user + secret_id = aws_secretsmanager_secret.fsx[each.key].id + secret_string = jsonencode({ + username = each.value + password = random_password.fsx[each.key].result + }) +} + + +data "aws_secretsmanager_secret_version" "fsx_creds" { + for_each = local.fsx_ontap_components_user + secret_id = aws_secretsmanager_secret.fsx[each.key].id + depends_on = [aws_secretsmanager_secret_version.fsx] +} + + +resource "aws_fsx_ontap_file_system" "eks" { + count = local.deploy_fsx ? 1 : 0 + storage_capacity = var.storage.fsx.storage_capacity + subnet_ids = local.fsx_subnet_ids + deployment_type = var.storage.fsx.deployment_type + preferred_subnet_id = local.fsx_subnet_ids[0] + security_group_ids = [aws_security_group.fsx[0].id] + kms_key_id = local.kms_key_arn + fsx_admin_password = jsondecode(data.aws_secretsmanager_secret_version.fsx_creds["filesystem"].secret_string)["password"] + throughput_capacity = var.storage.fsx.throughput_capacity + automatic_backup_retention_days = var.storage.fsx.automatic_backup_retention_days + daily_automatic_backup_start_time = var.storage.fsx.daily_automatic_backup_start_time + + + + lifecycle { + create_before_destroy = true + ignore_changes = [throughput_capacity] ## TODO: will keep trying to update ~ throughput_capacity = 0 -> 1536 + } + + tags = { + "Name" = var.deploy_id + "Backup" = "true" + } + + depends_on = [aws_secretsmanager_secret_version.fsx, aws_secretsmanager_secret.fsx] +} + +resource "aws_fsx_ontap_storage_virtual_machine" "eks" { + count = local.deploy_fsx ? 1 : 0 + file_system_id = aws_fsx_ontap_file_system.eks[0].id + name = "${var.deploy_id}-svm" + root_volume_security_style = "UNIX" + svm_admin_password = random_password.fsx["svm"].result + + tags = { + "Name" = "${var.deploy_id}-svm" + } +} diff --git a/modules/infra/submodules/storage/main.tf b/modules/infra/submodules/storage/main.tf index 8fd1f50d..f79eb7cb 100644 --- a/modules/infra/submodules/storage/main.tf +++ b/modules/infra/submodules/storage/main.tf @@ -5,6 +5,8 @@ data "aws_caller_identity" "this" {} locals { private_subnet_ids = var.network_info.subnets.private[*].subnet_id kms_key_arn = var.kms_info.enabled ? var.kms_info.key_arn : null + deploy_efs = var.storage.filesystem_type == "efs" + deploy_fsx = var.storage.filesystem_type == "fsx" s3_buckets = { for k, v in { backups = { diff --git a/modules/infra/submodules/storage/outputs.tf b/modules/infra/submodules/storage/outputs.tf index 9324b15b..2348a577 100644 --- a/modules/infra/submodules/storage/outputs.tf +++ b/modules/infra/submodules/storage/outputs.tf @@ -16,11 +16,20 @@ output "info" { } EOF value = { - efs = { - access_point = aws_efs_access_point.eks - file_system = aws_efs_file_system.eks - security_group_id = aws_security_group.efs.id - } + efs = local.deploy_efs ? { + access_point = aws_efs_access_point.eks[0] + file_system = aws_efs_file_system.eks[0] + security_group_id = aws_security_group.efs[0].id + } : null + fsx = local.deploy_fsx ? { + svm = { + name = aws_fsx_ontap_storage_virtual_machine.eks[0].name + management_ip = one(aws_fsx_ontap_storage_virtual_machine.eks[0].endpoints[0].management[0].ip_addresses) + nfs_ip = one(aws_fsx_ontap_storage_virtual_machine.eks[0].endpoints[0].nfs[0].ip_addresses) + creds_secret_arn = aws_secretsmanager_secret.fsx["svm"].arn + } + filesystem = { id = aws_fsx_ontap_file_system.eks[0].id, security_group_id = aws_security_group.fsx[0].id } + } : null s3 = { buckets = { for k, b in local.s3_buckets : k => { "bucket_name" = b.bucket_name, diff --git a/modules/infra/submodules/storage/variables.tf b/modules/infra/submodules/storage/variables.tf index 79dd9c59..b5a284c7 100644 --- a/modules/infra/submodules/storage/variables.tf +++ b/modules/infra/submodules/storage/variables.tf @@ -34,6 +34,7 @@ variable "kms_info" { variable "storage" { description = < [additional\_irsa\_configs](#input\_additional\_irsa\_configs) | Input for additional irsa configurations |
list(object({
name = string
namespace = string
serviceaccount_name = string
policy = string #json
}))
| `[]` | no | | [eks\_info](#input\_eks\_info) | cluster = {
specs {
name = Cluster name.
account\_id = AWS account id where the cluster resides.
}
oidc = {
arn = OIDC provider ARN.
url = OIDC provider url.
cert = {
thumbprint\_list = OIDC cert thumbprints.
url = OIDC cert URL.
}
} |
object({
nodes = object({
roles = list(object({
arn = string
name = string
}))
})
cluster = object({
specs = object({
name = string
account_id = string
})
oidc = object({
arn = string
url = string
cert = object({
thumbprint_list = list(string)
url = string
})
})
})
})
| n/a | yes | | [external\_dns](#input\_external\_dns) | Config to enable irsa for external-dns |
object({
enabled = optional(bool, false)
hosted_zone_name = optional(string, null)
hosted_zone_private = optional(string, false)
namespace = optional(string, "domino-platform")
serviceaccount_name = optional(string, "external-dns")
rm_role_policy = optional(object({
remove = optional(bool, false)
detach_from_role = optional(bool, false)
policy_name = optional(string, "")
}), {})
})
| `{}` | no | +| [netapp\_trident\_operator](#input\_netapp\_trident\_operator) | Config to create IRSA role for the netapp-trident-operator. |
object({
enabled = optional(bool, false)
namespace = optional(string, "trident")
serviceaccount_name = optional(string, "trident-controller")
region = optional(string)
})
| `{}` | no | | [use\_cluster\_odc\_idp](#input\_use\_cluster\_odc\_idp) | Toogle to uset the oidc idp connector in the trust policy.
Set to `true` if the cluster and the hosted zone are in different aws accounts.
`rm_role_policy` used to facilitiate the cleanup if a node attached policy was used previously. | `bool` | `true` | no | | [use\_fips\_endpoint](#input\_use\_fips\_endpoint) | Use aws FIPS endpoints | `bool` | `false` | no | @@ -57,5 +62,6 @@ No modules. | Name | Description | |------|-------------| | [external\_dns](#output\_external\_dns) | External\_dns info | +| [netapp\_trident\_operator](#output\_netapp\_trident\_operator) | NetApp Astra Trident FSX Operator role info | | [roles](#output\_roles) | Roles mapping info | diff --git a/modules/irsa/netapp-trident-operator.tf b/modules/irsa/netapp-trident-operator.tf new file mode 100644 index 00000000..0dae5405 --- /dev/null +++ b/modules/irsa/netapp-trident-operator.tf @@ -0,0 +1,67 @@ +data "aws_iam_policy_document" "trident_operator" { + count = var.netapp_trident_operator.enabled ? 1 : 0 + + statement { + effect = "Allow" + resources = ["arn:${data.aws_partition.current.partition}:secretsmanager:${var.netapp_trident_operator.region}:${data.aws_caller_identity.aws_account.account_id}:secret:${local.name_prefix}-fsx-ontap-*"] + + actions = [ + "secretsmanager:GetSecretValue" + ] + } + + statement { + effect = "Allow" + + resources = ["*"] + + actions = [ + "fsx:DescribeFileSystems", + "fsx:DescribeVolumes", + "fsx:CreateVolume", + "fsx:RestoreVolumeFromSnapshot", + "fsx:DescribeStorageVirtualMachines", + "fsx:UntagResource", + "fsx:UpdateVolume", + "fsx:TagResource", + "fsx:DeleteVolume" + ] + } +} + +resource "aws_iam_policy" "trident_operator" { + count = var.netapp_trident_operator.enabled ? 1 : 0 + name = "${local.name_prefix}-fsx-policy" + description = "Policy for FSx operations and Secrets Manager access" + + policy = data.aws_iam_policy_document.trident_operator[0].json +} + +resource "aws_iam_role" "trident_operator" { + count = var.netapp_trident_operator.enabled ? 1 : 0 + + name = "${local.name_prefix}-trident-operator" + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + Federated = local.oidc_provider_arn + } + Action = "sts:AssumeRoleWithWebIdentity" + Condition : { + StringEquals : { + "${trimprefix(local.oidc_provider_url, "https://")}:sub" : "system:serviceaccount:${var.netapp_trident_operator.namespace}:${var.netapp_trident_operator.serviceaccount_name}" + } + } + } + ] + }) +} + +resource "aws_iam_role_policy_attachment" "trident_operator" { + count = var.netapp_trident_operator.enabled ? 1 : 0 + role = aws_iam_role.trident_operator[0].name + policy_arn = aws_iam_policy.trident_operator[0].arn +} diff --git a/modules/irsa/outputs.tf b/modules/irsa/outputs.tf index db7d33d1..4d0c4344 100644 --- a/modules/irsa/outputs.tf +++ b/modules/irsa/outputs.tf @@ -12,3 +12,10 @@ output "external_dns" { external_dns_use_eks_idp = var.use_cluster_odc_idp } : null } + +output "netapp_trident_operator" { + description = "NetApp Astra Trident FSX Operator role info" + value = var.netapp_trident_operator.enabled ? { + irsa_role = aws_iam_role.trident_operator[0].arn + } : null +} diff --git a/modules/irsa/variables.tf b/modules/irsa/variables.tf index 0b48958e..a0be1022 100644 --- a/modules/irsa/variables.tf +++ b/modules/irsa/variables.tf @@ -93,3 +93,17 @@ variable "use_fips_endpoint" { type = bool default = false } + + +variable "netapp_trident_operator" { + description = "Config to create IRSA role for the netapp-trident-operator." + + type = object({ + enabled = optional(bool, false) + namespace = optional(string, "trident") + serviceaccount_name = optional(string, "trident-controller") + region = optional(string) + }) + + default = {} +}