"
- value = module.storage.efs_volume_handle
-}
-
-output "efs_access_point_id" {
- description = "EFS access_point id"
- value = module.storage.efs_access_point_id
-}
-
-output "efs_file_system_id" {
- description = "EFS filesystem id"
- value = module.storage.efs_file_system_id
-}
-
-output "region" {
- description = "Deployment region."
- value = var.region
+output "efs_access_point" {
+ description = "EFS access point"
+ value = module.storage.efs_access_point
}
-output "deploy_id" {
- description = "Deployment ID."
- value = var.deploy_id
+output "efs_file_system" {
+ description = "EFS file system"
+ value = module.storage.efs_file_system
}
output "s3_buckets" {
- description = "S3 buckets name,arn."
+ description = "S3 buckets"
value = module.storage.s3_buckets
}
-output "key_pair_name" {
- description = "SSH key pair name."
- value = aws_key_pair.domino.key_name
+output "domino_key_pair" {
+ description = "Domino key pair"
+ value = aws_key_pair.domino
+}
+
+output "kubeconfig" {
+ value = local.kubeconfig_path
}
diff --git a/submodules/bastion/README.md b/submodules/bastion/README.md
index d5888279..592d13f2 100644
--- a/submodules/bastion/README.md
+++ b/submodules/bastion/README.md
@@ -12,7 +12,7 @@
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | 4.22.0 |
+| [aws](#provider\_aws) | >= 4.0 |
## Modules
diff --git a/submodules/eks/README.md b/submodules/eks/README.md
index 73c1a1ee..aabdfe5f 100644
--- a/submodules/eks/README.md
+++ b/submodules/eks/README.md
@@ -13,8 +13,8 @@
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | 4.22.0 |
-| [null](#provider\_null) | 3.1.1 |
+| [aws](#provider\_aws) | >= 4.0 |
+| [null](#provider\_null) | >= 3.1.0 |
## Modules
@@ -34,14 +34,11 @@ No modules.
| [aws_iam_policy.autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_policy.domino_ecr_restricted](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_policy.ebs_csi](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [aws_iam_policy.route53](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [aws_iam_policy.s3](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_policy.snapshot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_role.eks_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role.eks_nodes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role_policy_attachment.aws_eks_nodes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_iam_role_policy_attachment.custom_eks_nodes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.custom_eks_nodes_route53](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_iam_role_policy_attachment.eks_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_kms_key.eks_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_launch_template.additional_node_groups](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
@@ -63,11 +60,8 @@ No modules.
| [aws_iam_policy_document.eks_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.eks_nodes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.kms_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.route53](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_policy_document.s3](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.snapshot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
-| [aws_route53_zone.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/route53_zone) | data source |
## Inputs
@@ -84,8 +78,6 @@ No modules.
| [kubeconfig\_path](#input\_kubeconfig\_path) | Kubeconfig file path. | `string` | `"kubeconfig"` | no |
| [private\_subnets](#input\_private\_subnets) | Private subnets object | list(object({
cidr_block = string
name = string
type = string
zone = string
zone_id = string
id = string
}))
| n/a | yes |
| [region](#input\_region) | AWS region for the deployment | `string` | n/a | yes |
-| [route53\_hosted\_zone\_name](#input\_route53\_hosted\_zone\_name) | Route53 zone | `string` | n/a | yes |
-| [s3\_buckets](#input\_s3\_buckets) | S3 buckets information that the nodegroups need access to | list(object({
bucket_name = string
arn = string
}))
| n/a | yes |
| [ssh\_pvt\_key\_path](#input\_ssh\_pvt\_key\_path) | SSH private key filepath. | `string` | n/a | yes |
| [vpc\_id](#input\_vpc\_id) | VPC ID. | `string` | n/a | yes |
@@ -94,9 +86,8 @@ No modules.
| Name | Description |
|------|-------------|
| [cluster\_endpoint](#output\_cluster\_endpoint) | EKS cluster API endpoint. |
-| [eks\_master\_role\_name](#output\_eks\_master\_role\_name) | EKS master role arns. |
-| [hosted\_zone\_id](#output\_hosted\_zone\_id) | DNS hosted zone ID. |
-| [managed\_nodes\_role\_arns](#output\_managed\_nodes\_role\_arns) | EKS managed nodes arns. |
+| [eks\_master\_roles](#output\_eks\_master\_roles) | EKS master roles. |
+| [eks\_node\_roles](#output\_eks\_node\_roles) | EKS managed node roles |
| [nodes\_security\_group\_id](#output\_nodes\_security\_group\_id) | EKS managed nodes security group id. |
| [security\_group\_id](#output\_security\_group\_id) | EKS security group id. |
diff --git a/submodules/eks/cluster.tf b/submodules/eks/cluster.tf
index b1c5ee70..26a8da95 100755
--- a/submodules/eks/cluster.tf
+++ b/submodules/eks/cluster.tf
@@ -116,10 +116,7 @@ resource "aws_eks_addon" "this" {
resource "null_resource" "kubeconfig" {
provisioner "local-exec" {
- environment = {
- KUBECONFIG = var.kubeconfig_path
- }
- command = "aws eks update-kubeconfig --region ${var.region} --name ${aws_eks_cluster.this.name}"
+ command = "aws eks update-kubeconfig --kubeconfig ${var.kubeconfig_path} --region ${var.region} --name ${aws_eks_cluster.this.name}"
}
triggers = {
domino_eks_cluster_ca = aws_eks_cluster.this.certificate_authority[0].data
diff --git a/submodules/eks/iam.tf b/submodules/eks/iam.tf
index 8463a3b9..a0715d0b 100644
--- a/submodules/eks/iam.tf
+++ b/submodules/eks/iam.tf
@@ -47,41 +47,6 @@ resource "aws_iam_policy" "domino_ecr_restricted" {
policy = data.aws_iam_policy_document.domino_ecr_restricted.json
}
-data "aws_iam_policy_document" "s3" {
- statement {
-
- effect = "Allow"
- resources = ["*"]
-
- actions = [
- "s3:ListBucket",
- "s3:GetBucketLocation",
- "s3:ListBucketMultipartUploads",
- ]
- }
-
- statement {
- sid = ""
- effect = "Allow"
-
- resources = [for b in var.s3_buckets : "${b.arn}*"]
-
- actions = [
- "s3:PutObject",
- "s3:GetObject",
- "s3:DeleteObject",
- "s3:ListMultipartUploadParts",
- "s3:AbortMultipartUpload",
- ]
- }
-}
-
-resource "aws_iam_policy" "s3" {
- name = "${var.deploy_id}-S3"
- path = "/"
- policy = data.aws_iam_policy_document.s3.json
-}
-
data "aws_iam_policy_document" "autoscaler" {
statement {
@@ -228,33 +193,6 @@ resource "aws_iam_policy" "ebs_csi" {
policy = data.aws_iam_policy_document.ebs_csi.json
}
-data "aws_iam_policy_document" "route53" {
- statement {
-
- effect = "Allow"
- resources = ["*"]
- actions = ["route53:ListHostedZones"]
- }
-
- statement {
-
- effect = "Allow"
- resources = [local.aws_route53_zone_arn]
-
- actions = [
- "route53:ChangeResourceRecordSets",
- "route53:ListResourceRecordSets",
- ]
- }
-}
-
-resource "aws_iam_policy" "route53" {
- count = var.route53_hosted_zone_name != "" ? 1 : 0
- name = "${var.deploy_id}-Route53"
- path = "/"
- policy = data.aws_iam_policy_document.route53.json
-}
-
data "aws_iam_policy_document" "snapshot" {
statement {
@@ -290,10 +228,8 @@ locals {
eks_custom_node_iam_policies = {
"domino_ecr_restricted" = aws_iam_policy.domino_ecr_restricted.arn,
- "s3" = aws_iam_policy.s3.arn,
"autoscaler" = aws_iam_policy.autoscaler.arn,
"ebs_csi" = aws_iam_policy.ebs_csi.arn,
- "route53" = try(aws_iam_policy.route53[0].arn, ""),
"snapshot" = aws_iam_policy.snapshot.arn
}
}
@@ -305,13 +241,7 @@ resource "aws_iam_role_policy_attachment" "aws_eks_nodes" {
}
resource "aws_iam_role_policy_attachment" "custom_eks_nodes" {
- for_each = { for name, arn in local.eks_custom_node_iam_policies : name => arn if name != "route53" }
+ for_each = { for name, arn in local.eks_custom_node_iam_policies : name => arn }
policy_arn = each.value
role = aws_iam_role.eks_nodes.name
}
-
-resource "aws_iam_role_policy_attachment" "custom_eks_nodes_route53" {
- count = var.route53_hosted_zone_name != "" ? 1 : 0
- policy_arn = local.eks_custom_node_iam_policies["route53"]
- role = aws_iam_role.eks_nodes.name
-}
diff --git a/submodules/eks/node-group.tf b/submodules/eks/node-group.tf
index 23b1e65d..d320ea1b 100644
--- a/submodules/eks/node-group.tf
+++ b/submodules/eks/node-group.tf
@@ -11,18 +11,12 @@ data "aws_iam_policy_document" "eks_nodes" {
}
}
-data "aws_route53_zone" "this" {
- name = var.route53_hosted_zone_name
- private_zone = false
-}
-
resource "aws_iam_role" "eks_nodes" {
name = "${local.eks_cluster_name}-eks-nodes"
assume_role_policy = data.aws_iam_policy_document.eks_nodes.json
}
locals {
- aws_route53_zone_arn = data.aws_route53_zone.this.arn
gpu_bootstrap_extra_args = ""
gpu_user_data = base64encode(templatefile("${path.module}/templates/linux_custom.tpl", {
cluster_name = aws_eks_cluster.this.name
diff --git a/submodules/eks/outputs.tf b/submodules/eks/outputs.tf
index 6535c6aa..2803f752 100644
--- a/submodules/eks/outputs.tf
+++ b/submodules/eks/outputs.tf
@@ -14,17 +14,12 @@ output "cluster_endpoint" {
value = aws_eks_cluster.this.endpoint
}
-output "managed_nodes_role_arns" {
- description = "EKS managed nodes arns."
- value = [aws_iam_role.eks_nodes.arn]
+output "eks_node_roles" {
+ description = "EKS managed node roles"
+ value = [aws_iam_role.eks_nodes]
}
-output "eks_master_role_name" {
- description = "EKS master role arns."
- value = [aws_iam_role.eks_cluster.name]
-}
-
-output "hosted_zone_id" {
- description = "DNS hosted zone ID."
- value = data.aws_route53_zone.this.zone_id
+output "eks_master_roles" {
+ description = "EKS master roles."
+ value = [aws_iam_role.eks_cluster]
}
diff --git a/submodules/eks/variables.tf b/submodules/eks/variables.tf
index f4b161e8..5f0fa430 100755
--- a/submodules/eks/variables.tf
+++ b/submodules/eks/variables.tf
@@ -137,11 +137,6 @@ variable "ssh_pvt_key_path" {
description = "SSH private key filepath."
}
-variable "route53_hosted_zone_name" {
- type = string
- description = "Route53 zone"
-}
-
variable "bastion_security_group_id" {
type = string
description = "Bastion security group id."
@@ -172,12 +167,3 @@ variable "create_bastion_sg" {
description = "Create bastion access rules toggle."
type = bool
}
-
-variable "s3_buckets" {
- description = "S3 buckets information that the nodegroups need access to"
- type = list(object({
- bucket_name = string
- arn = string
- }))
-
-}
diff --git a/submodules/k8s/README.md b/submodules/k8s/README.md
index 266f4285..2e142254 100644
--- a/submodules/k8s/README.md
+++ b/submodules/k8s/README.md
@@ -14,9 +14,8 @@
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | 4.22.0 |
-| [local](#provider\_local) | 2.2.3 |
-| [null](#provider\_null) | 3.1.1 |
+| [local](#provider\_local) | >= 2.2.0 |
+| [null](#provider\_null) | >= 3.1.0 |
## Modules
@@ -28,7 +27,6 @@ No modules.
|------|------|
| [local_file.templates](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
| [null_resource.run_k8s_pre_setup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
-| [aws_iam_role.eks_master_roles](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_role) | data source |
## Inputs
@@ -37,12 +35,12 @@ No modules.
| [bastion\_public\_ip](#input\_bastion\_public\_ip) | Bastion host public ip. | `string` | n/a | yes |
| [bastion\_user](#input\_bastion\_user) | ec2 instance user. | `string` | `"ec2-user"` | no |
| [calico\_version](#input\_calico\_version) | Calico operator version. | `string` | `"v1.11.0"` | no |
-| [eks\_master\_role\_names](#input\_eks\_master\_role\_names) | IAM role names to be added as masters in eks. | `list(string)` | `[]` | no |
+| [eks\_master\_role\_arns](#input\_eks\_master\_role\_arns) | IAM role arns to be added as masters in eks. | `list(string)` | `[]` | no |
+| [eks\_node\_role\_arns](#input\_eks\_node\_role\_arns) | Roles arns for EKS nodes to be added to aws-auth for api auth. | `list(string)` | n/a | yes |
| [k8s\_cluster\_endpoint](#input\_k8s\_cluster\_endpoint) | EKS cluster API endpoint. | `string` | n/a | yes |
| [kubeconfig\_path](#input\_kubeconfig\_path) | Kubeconfig filename. | `string` | `"kubeconfig"` | no |
| [mallory\_local\_normal\_port](#input\_mallory\_local\_normal\_port) | Mallory k8s tunnel normal port. | `string` | `"1315"` | no |
| [mallory\_local\_smart\_port](#input\_mallory\_local\_smart\_port) | Mallory k8s tunnel smart(filters based on the blocked list) port. | `string` | `"1316"` | no |
-| [managed\_nodes\_role\_arns](#input\_managed\_nodes\_role\_arns) | EKS managed nodes arns to be added to aws-auth for api auth. | `list(string)` | n/a | yes |
| [ssh\_pvt\_key\_path](#input\_ssh\_pvt\_key\_path) | SSH private key filepath. | `string` | n/a | yes |
## Outputs
diff --git a/submodules/k8s/main.tf b/submodules/k8s/main.tf
index f9c82843..07b3e7b7 100644
--- a/submodules/k8s/main.tf
+++ b/submodules/k8s/main.tf
@@ -1,8 +1,3 @@
-data "aws_iam_role" "eks_master_roles" {
- for_each = toset(var.eks_master_role_names)
- name = each.key
-}
-
locals {
mallory_config_filename = "mallory.json"
mallory_container_name = "mallory_k8s_tunnel"
@@ -61,14 +56,12 @@ locals {
filename = local.aws_auth_filename
content = templatefile("${local.templates_dir}/${local.aws_auth_template}",
{
- eks_managed_nodes_role_arns = sort(var.managed_nodes_role_arns)
- eks_master_role_arns = try({ for r in sort(var.eks_master_role_names) : r => data.aws_iam_role.eks_master_roles[r].arn }, {})
-
+ eks_node_role_arns = toset(var.eks_node_role_arns)
+ eks_master_role_arns = toset(var.eks_master_role_arns)
})
}
}
-
}
resource "local_file" "templates" {
diff --git a/submodules/k8s/templates/aws-auth.yaml.tftpl b/submodules/k8s/templates/aws-auth.yaml.tftpl
index 51cc304c..b456d99b 100644
--- a/submodules/k8s/templates/aws-auth.yaml.tftpl
+++ b/submodules/k8s/templates/aws-auth.yaml.tftpl
@@ -5,7 +5,7 @@ metadata:
namespace: kube-system
data:
mapRoles: |
-%{ for arn in eks_managed_nodes_role_arns ~}
+%{ for arn in eks_node_role_arns ~}
- rolearn: ${arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
diff --git a/submodules/k8s/templates/k8s-pre-setup.sh.tftpl.back b/submodules/k8s/templates/k8s-pre-setup.sh.tftpl.back
deleted file mode 100644
index 64898311..00000000
--- a/submodules/k8s/templates/k8s-pre-setup.sh.tftpl.back
+++ /dev/null
@@ -1,172 +0,0 @@
-#! /usr/bin/env bash
-
-RED="\e[31m"
-GREEN="\e[32m"
-EC="\e[0m"
-
-open_ssh_tunnel_to_k8s_api() {
- printf "$GREEN Setting up mallory k8s tunnel... $EC \n"
- if [[ -z "$https_proxy" ]]; then
- MALLORY_PORT=${mallory_port}
- https_proxy=http://localhost:$MALLORY_PORT
- export https_proxy
- fi
- eval ${k8s_tunnel_command} && sleep 3
- echo
-}
-
-check_kubeconfig() {
- KUBECONFIG="${kubeconfig_path}"
- printf "$GREEN Checking if $KUBECONFIG exists... $EC \n"
- if test -f "$KUBECONFIG"; then
- echo "$KUBECONFIG exists." && export KUBECONFIG
- else
- echo "$KUBECONFIG does not exist." && exit 1
- fi
- echo
-}
-
-set_k8s_auth() {\
- AWS_AUTH_YAML="${aws_auth_yaml}"
- if test -f "$AWS_AUTH_YAML"; then
- printf "$GREEN Updating $AWS_AUTH_YAML... $EC \n"
- kubectl apply -f "$AWS_AUTH_YAML"
- else
- printf "$RED $AWS_AUTH_YAML does not exist. $EC \n" && exit 1
- fi
- echo
-}
-
-install_calico() {
- CALICO_OPERATOR_YAML_URL=${calico_operator_url}
- printf "$GREEN Installing Calico Operator $EC \n"
- kubectl apply -f $CALICO_OPERATOR_YAML_URL || printf "$RED There was an error installing the calico operator"
- echo
- CALICO_CRD_YAML_URL=${calico_custom_resources_url}
- printf "$GREEN Installing Calico Custom resources $EC \n" || printf "$RED There was an error installing the calico CRD"
- kubectl apply -f $CALICO_CRD_YAML_URL
- echo
-}
-
-
-create_namespace() {
- namespace=$1
- printf "$GREEN Creating namespace $namespace $EC \n"
- kubectl create namespace $namespace --dry-run=client --save-config -o yaml | kubectl apply -f -
- echo
-}
-
-helm_install() {
- HELM_REPO=${domino_helm_repo}
- printf "$RED Make sure you are authenticated with $HELM_REPO $EC \n"
- chart_name=$1
- chart_version=$2
- namespace=$3
-
- printf "$GREEN Installing helm-chart:$chart_name version:$chart_version $EC \n"
-
- helm_command="helm upgrade --install $chart_name $HELM_REPO/$chart_name --version $chart_version --namespace $namespace"
- echo "$helm_command"
- eval "$helm_command"
-}
-
-kubectl_apply() {
- k8s_manifest="$1"
- if test -f "$k8s_manifest"; then
- echo "Applying $k8s_manifest..."
- kubectl apply -f $k8s_manifest
- else
- printf "$RED $k8s_manifest does not exist. $EC \n" && exit 1
- fi
-
- kubectl apply -f "$k8s_manifest"
-}
-
-create_namespaces() {
- %{~ for namespace in domino_namespaces ~}
- create_namespace ${namespace}
- %{~ endfor }
- echo
-}
-
-install_helm_charts() {
- %{~ for chart in helm_charts ~}
- helm_install ${chart.name} ${chart.version} ${domino_namespaces_map[chart.namespace]}
- %{~ endfor }
- echo
-}
-
-# create_storage_classes() {
-# echo "Creating storageclasses..."
-
-# %{~ for manifest in domino_storage_classes_filenames ~}
-# kubectl_apply ${manifest}
-# %{~ endfor }
-# echo
-# }
-
-create_persistent_volumes() {
- printf "$GREEN Creating persistent-volumes... $EC \n"
-
- %{~ for manifest in domino_persistent_volume_filenames ~}
- kubectl_apply ${manifest}
- %{~ endfor }
- echo
-}
-create_persistent_volume_claims() {
- printf "$GREEN Creating persistent-volume-claims... $EC \n"
-
- %{~ for manifest in domino_persistent_volume_claims_filenames ~}
- kubectl_apply ${manifest}
- %{~ endfor }
- echo
-}
-create_persistent_storage() {
- create_persistent_volumes && create_persistent_volume_claims
-}
-
-close_ssh_tunnel_to_k8s_api() {
- printf "$GREEN Shutting down mallory k8s tunnel ${mallory_container_name} ... $EC"
- docker kill "${mallory_container_name}"
- docker rm "${mallory_container_name}" || true
- echo
-}
-
-create_docker_cred_secret() {
- printf "$GREEN Creating domino-quay-repos secret (for domino docker repo pullImageSecret)... $EC"
- kubectl create secret \
- docker-registry \
- -o yaml --dry-run=client --save-config \
- --docker-server=quay.io \
- --docker-username=$QUAY_USERNAME \
- --docker-password=$QUAY_PASSWORD \
- --docker-email=. ${domino_docker_pull_secret_name} | kubectl apply -f -
-
- echo
-}
-
-create_domino_cm() {
- printf "$GREEN Creating configmap for fleetcommand configuration(${domino_config_filename})... $EC"
- kubectl create configmap \
- -o yaml --dry-run=client --save-config \
- fleetcommand-agent-config \
- --from-file=${domino_config_filename} | kubectl apply -f -
- echo
-}
-
-
-
-main() {
- open_ssh_tunnel_to_k8s_api
- check_kubeconfig
- set_k8s_auth
- # create_namespaces
- # create_docker_cred_secret
- # create_persistent_storage
- install_calico
- # create_domino_cm
- # install_helm_charts
-}
-
-trap close_ssh_tunnel_to_k8s_api EXIT
-main
diff --git a/submodules/k8s/variables.tf b/submodules/k8s/variables.tf
index 54adddd9..3ff646c4 100755
--- a/submodules/k8s/variables.tf
+++ b/submodules/k8s/variables.tf
@@ -26,14 +26,14 @@ variable "k8s_cluster_endpoint" {
description = "EKS cluster API endpoint."
}
-variable "managed_nodes_role_arns" {
+variable "eks_node_role_arns" {
type = list(string)
- description = "EKS managed nodes arns to be added to aws-auth for api auth."
+ description = "Roles arns for EKS nodes to be added to aws-auth for api auth."
}
-variable "eks_master_role_names" {
+variable "eks_master_role_arns" {
type = list(string)
- description = "IAM role names to be added as masters in eks."
+ description = "IAM role arns to be added as masters in eks."
default = []
}
diff --git a/submodules/network/README.md b/submodules/network/README.md
index f16b4819..12425e41 100644
--- a/submodules/network/README.md
+++ b/submodules/network/README.md
@@ -12,7 +12,7 @@
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | 4.22.0 |
+| [aws](#provider\_aws) | >= 4.0 |
## Modules
diff --git a/submodules/storage/README.md b/submodules/storage/README.md
index 7ad6b72b..4cf939e0 100644
--- a/submodules/storage/README.md
+++ b/submodules/storage/README.md
@@ -12,7 +12,7 @@
| Name | Version |
|------|---------|
-| [aws](#provider\_aws) | 4.22.0 |
+| [aws](#provider\_aws) | 4.32.0 |
## Modules
@@ -25,6 +25,8 @@ No modules.
| [aws_efs_access_point.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/efs_access_point) | resource |
| [aws_efs_file_system.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/efs_file_system) | resource |
| [aws_efs_mount_target.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/efs_mount_target) | resource |
+| [aws_iam_policy.s3](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_role_policy_attachment.s3](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_s3_bucket.backups](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket) | resource |
| [aws_s3_bucket.blobs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket) | resource |
| [aws_s3_bucket.logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket) | resource |
@@ -46,6 +48,7 @@ No modules.
| [aws_iam_policy_document.logs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.monitoring](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.registry](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.s3](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
## Inputs
@@ -53,6 +56,7 @@ No modules.
|------|-------------|------|---------|:--------:|
| [deploy\_id](#input\_deploy\_id) | Domino Deployment ID | `string` | n/a | yes |
| [efs\_access\_point\_path](#input\_efs\_access\_point\_path) | Filesystem path for efs. | `string` | `"/domino"` | no |
+| [roles](#input\_roles) | List of roles to grant s3 permissions | `list(any)` | n/a | yes |
| [s3\_encryption\_use\_sse\_kms\_key](#input\_s3\_encryption\_use\_sse\_kms\_key) | if true use 'aws:kms' else 'AES256' for the s3 server-side-encryption. | `bool` | `false` | no |
| [s3\_force\_destroy\_on\_deletion](#input\_s3\_force\_destroy\_on\_deletion) | Toogle to allow recursive deletion of all objects in the s3 buckets. if 'false' terraform will NOT be able to delete non-empty buckets | `bool` | `false` | no |
| [subnets](#input\_subnets) | List of subnet ids to create EFS mount targets | list(object({
name = string
id = string
cidr_block = string
}))
| n/a | yes |
@@ -62,9 +66,7 @@ No modules.
| Name | Description |
|------|-------------|
-| [efs\_access\_point\_id](#output\_efs\_access\_point\_id) | EFS access\_point id |
-| [efs\_file\_system\_id](#output\_efs\_file\_system\_id) | EFS filesystem id |
-| [efs\_volume\_handle](#output\_efs\_volume\_handle) | EFS volume handle :: |
-| [monitoring\_s3\_bucket\_arn](#output\_monitoring\_s3\_bucket\_arn) | Monitoring bucket arn |
+| [efs\_access\_point](#output\_efs\_access\_point) | efs access point |
+| [efs\_file\_system](#output\_efs\_file\_system) | efs file system |
| [s3\_buckets](#output\_s3\_buckets) | S3 buckets name and arn |
diff --git a/submodules/storage/iam.tf b/submodules/storage/iam.tf
new file mode 100644
index 00000000..150eb03a
--- /dev/null
+++ b/submodules/storage/iam.tf
@@ -0,0 +1,40 @@
+data "aws_iam_policy_document" "s3" {
+ statement {
+
+ effect = "Allow"
+ resources = [for b in local.s3_buckets : b.arn]
+
+ actions = [
+ "s3:ListBucket",
+ "s3:GetBucketLocation",
+ "s3:ListBucketMultipartUploads",
+ ]
+ }
+
+ statement {
+ sid = ""
+ effect = "Allow"
+
+ resources = [for b in local.s3_buckets : "${b.arn}/*"]
+
+ actions = [
+ "s3:PutObject",
+ "s3:GetObject",
+ "s3:DeleteObject",
+ "s3:ListMultipartUploadParts",
+ "s3:AbortMultipartUpload",
+ ]
+ }
+}
+
+resource "aws_iam_policy" "s3" {
+ name = "${var.deploy_id}-S3"
+ path = "/"
+ policy = data.aws_iam_policy_document.s3.json
+}
+
+resource "aws_iam_role_policy_attachment" "s3" {
+ for_each = toset([for r in var.roles : r.name])
+ policy_arn = aws_iam_policy.s3.arn
+ role = each.value
+}
diff --git a/submodules/storage/outputs.tf b/submodules/storage/outputs.tf
index 14c029a3..13c7cf8d 100644
--- a/submodules/storage/outputs.tf
+++ b/submodules/storage/outputs.tf
@@ -1,27 +1,17 @@
-output "efs_volume_handle" {
- description = "EFS volume handle ::"
- value = "${aws_efs_access_point.eks.file_system_id}::${aws_efs_access_point.eks.id}"
+output "efs_access_point" {
+ description = "efs access point"
+ value = aws_efs_access_point.eks
}
-output "efs_access_point_id" {
- description = "EFS access_point id"
- value = aws_efs_access_point.eks.id
-}
-
-output "efs_file_system_id" {
- description = "EFS filesystem id"
- value = aws_efs_file_system.eks.id
-}
-
-output "monitoring_s3_bucket_arn" {
- description = "Monitoring bucket arn"
- value = aws_s3_bucket.backups.arn
+output "efs_file_system" {
+ description = "efs file system"
+ value = aws_efs_file_system.eks
}
output "s3_buckets" {
description = "S3 buckets name and arn"
- value = [for b in local.s3_buckets : {
+ value = { for k, b in local.s3_buckets : k => {
"bucket_name" = b.bucket_name,
"arn" = b.arn
- }]
+ } }
}
diff --git a/submodules/storage/variables.tf b/submodules/storage/variables.tf
index d35781e8..53225fef 100755
--- a/submodules/storage/variables.tf
+++ b/submodules/storage/variables.tf
@@ -42,3 +42,8 @@ variable "s3_encryption_use_sse_kms_key" {
type = bool
default = false
}
+
+variable "roles" {
+ description = "List of roles to grant s3 permissions"
+ type = list(any)
+}
diff --git a/variables.tf b/variables.tf
index d13ac93f..1b9ea7d4 100755
--- a/variables.tf
+++ b/variables.tf
@@ -48,7 +48,8 @@ variable "availability_zones" {
variable "route53_hosted_zone_name" {
type = string
- description = "AWS Route53 Hosted zone."
+ description = "Optional hosted zone for External DNSone."
+ default = ""
}
variable "tags" {
@@ -220,3 +221,9 @@ variable "s3_force_destroy_on_deletion" {
type = bool
default = false
}
+
+variable "kubeconfig_path" {
+ description = "fully qualified path name to write the kubeconfig file"
+ type = string
+ default = ""
+}