From 770850e440d62e72683a5630ed639518cdf265b8 Mon Sep 17 00:00:00 2001 From: Chris Doherty Date: Wed, 25 Oct 2023 13:01:03 -0500 Subject: [PATCH] Add SANs configurability for API server --- .../anywhere.eks.amazonaws.com_clusters.yaml | 6 + pkg/api/v1alpha1/cluster.go | 20 + pkg/api/v1alpha1/cluster_types.go | 3 + pkg/api/v1alpha1/zz_generated.deepcopy.go | 5 + .../cloudstack/config/template-cp.yaml | 4 + pkg/providers/cloudstack/template.go | 1 + pkg/providers/cloudstack/template_test.go | 28 + ...uster_api_server_cert_san_domain_name.yaml | 69 ++ .../cluster_api_server_cert_san_ip.yaml | 70 ++ ...uster_api_server_cert_san_domain_name.yaml | 406 +++++++++++ ...pected_cluster_api_server_cert_san_ip.yaml | 406 +++++++++++ pkg/providers/docker/config/template-cp.yaml | 3 + pkg/providers/docker/docker.go | 1 + pkg/providers/docker/docker_test.go | 26 + ...uster_api_server_cert_san_domain_name.yaml | 29 + .../cluster_api_server_cert_san_ip.yaml | 29 + ...uster_api_server_cert_san_domain_name.yaml | 280 ++++++++ ...pected_cluster_api_server_cert_san_ip.yaml | 280 ++++++++ .../tinkerbell/config/template-cp.yaml | 6 +- pkg/providers/tinkerbell/template.go | 1 + pkg/providers/tinkerbell/template_test.go | 34 + ...rbell_api_server_cert_san_domain_name.yaml | 62 ++ ...ter_tinkerbell_api_server_cert_san_ip.yaml | 62 ++ ...rbell_api_server_cert_san_domain_name.yaml | 342 ++++++++++ ...ter_tinkerbell_api_server_cert_san_ip.yaml | 342 ++++++++++ pkg/providers/vsphere/config/template-cp.yaml | 4 + pkg/providers/vsphere/template.go | 1 + pkg/providers/vsphere/template_test.go | 95 +-- ...uster_api_server_cert_san_domain_name.yaml | 58 ++ .../cluster_api_server_cert_san_ip.yaml | 58 ++ ...uster_api_server_cert_san_domain_name.yaml | 633 ++++++++++++++++++ ...pected_cluster_api_server_cert_san_ip.yaml | 633 ++++++++++++++++++ pkg/templater/templater.go | 31 +- 33 files changed, 3946 insertions(+), 82 deletions(-) create mode 100644 pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_domain_name.yaml create mode 100644 pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_ip.yaml create mode 100644 pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_domain_name.yaml create mode 100644 pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_ip.yaml create mode 100644 pkg/providers/docker/testdata/cluster_api_server_cert_san_domain_name.yaml create mode 100644 pkg/providers/docker/testdata/cluster_api_server_cert_san_ip.yaml create mode 100644 pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_domain_name.yaml create mode 100644 pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_ip.yaml create mode 100644 pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_domain_name.yaml create mode 100644 pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_ip.yaml create mode 100644 pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_domain_name.yaml create mode 100644 pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_ip.yaml create mode 100644 pkg/providers/vsphere/testdata/cluster_api_server_cert_san_domain_name.yaml create mode 100644 pkg/providers/vsphere/testdata/cluster_api_server_cert_san_ip.yaml create mode 100644 pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_domain_name.yaml create mode 100644 pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_ip.yaml diff --git a/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml b/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml index 92b773ab2c1f3..001e55b87de93 100644 --- a/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml +++ b/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml @@ -124,6 +124,12 @@ spec: type: object controlPlaneConfiguration: properties: + certSans: + description: CertSANs is a slice of domain names or IPs to be + added as Subject Name Alternatives of the Kube API Servers Certificate. + items: + type: string + type: array count: description: Count defines the number of desired control plane nodes. Defaults to 1. diff --git a/pkg/api/v1alpha1/cluster.go b/pkg/api/v1alpha1/cluster.go index 6fcc04fdf2c3a..47c4e2bf4d9c7 100644 --- a/pkg/api/v1alpha1/cluster.go +++ b/pkg/api/v1alpha1/cluster.go @@ -17,6 +17,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" utilerrors "k8s.io/apimachinery/pkg/util/errors" + k8sclientvalidation "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" yamlutil "k8s.io/apimachinery/pkg/util/yaml" "sigs.k8s.io/yaml" @@ -188,6 +189,7 @@ var clusterConfigValidations = []func(*Cluster) error{ validateControlPlaneLabels, validatePackageControllerConfiguration, validateEksaVersion, + validateControlPlaneCertSANs, } // GetClusterConfig parses a Cluster object from a multiobject yaml file in disk @@ -419,6 +421,24 @@ func validateControlPlaneEndpoint(clusterConfig *Cluster) error { return nil } +func validateControlPlaneCertSANs(cfg *Cluster) error { + var invalid []string + for _, san := range cfg.Spec.ControlPlaneConfiguration.CertSANs { + isDomain := k8sclientvalidation.IsDNS1123Subdomain(san) + isIP := net.ParseIP(san) + + if len(isDomain) > 0 && isIP == nil { + invalid = append(invalid, san) + } + } + + if len(invalid) > 0 { + return fmt.Errorf("invalid ControlPlaneConfiguration.CertSANs; must be an IP or domain name: [%v]", strings.Join(invalid, ", ")) + } + + return nil +} + func validateWorkerNodeGroups(clusterConfig *Cluster) error { workerNodeGroupConfigs := clusterConfig.Spec.WorkerNodeGroupConfigurations if len(workerNodeGroupConfigs) <= 0 { diff --git a/pkg/api/v1alpha1/cluster_types.go b/pkg/api/v1alpha1/cluster_types.go index 5dfc62be1222b..3c252eae372af 100644 --- a/pkg/api/v1alpha1/cluster_types.go +++ b/pkg/api/v1alpha1/cluster_types.go @@ -282,6 +282,9 @@ type ControlPlaneConfiguration struct { // SkipLoadBalancerDeployment skip deploying control plane load balancer. // Make sure your infrastructure can handle control plane load balancing when you set this field to true. SkipLoadBalancerDeployment bool `json:"skipLoadBalancerDeployment,omitempty"` + // CertSANs is a slice of domain names or IPs to be added as Subject Name Alternatives of the + // Kube API Servers Certificate. + CertSANs []string `json:"certSans,omitempty"` } // MachineHealthCheck allows to configure timeouts for machine health checks. Machine Health Checks are responsible for remediating unhealthy Machines. diff --git a/pkg/api/v1alpha1/zz_generated.deepcopy.go b/pkg/api/v1alpha1/zz_generated.deepcopy.go index 66ed81d60bd32..9cbce5238b067 100644 --- a/pkg/api/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/api/v1alpha1/zz_generated.deepcopy.go @@ -867,6 +867,11 @@ func (in *ControlPlaneConfiguration) DeepCopyInto(out *ControlPlaneConfiguration *out = new(ControlPlaneUpgradeRolloutStrategy) **out = **in } + if in.CertSANs != nil { + in, out := &in.CertSANs, &out.CertSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneConfiguration. diff --git a/pkg/providers/cloudstack/config/template-cp.yaml b/pkg/providers/cloudstack/config/template-cp.yaml index a55be33d4c07b..d9c2e6e827f1a 100644 --- a/pkg/providers/cloudstack/config/template-cp.yaml +++ b/pkg/providers/cloudstack/config/template-cp.yaml @@ -87,6 +87,10 @@ spec: imageRepository: {{.corednsRepository}} imageTag: {{.corednsVersion}} apiServer: + {{- with .apiServerCertSANs }} + certSANs: + {{- toYaml . | nindent 8 }} + {{- end }} extraArgs: cloud-provider: external audit-policy-file: /etc/kubernetes/audit-policy.yaml diff --git a/pkg/providers/cloudstack/template.go b/pkg/providers/cloudstack/template.go index 78945e855ce82..68b9fad224040 100644 --- a/pkg/providers/cloudstack/template.go +++ b/pkg/providers/cloudstack/template.go @@ -143,6 +143,7 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro "controlPlaneEndpointHost": host, "controlPlaneEndpointPort": port, "controlPlaneReplicas": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count, + "apiServerCertSANs": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, "kubernetesRepository": versionsBundle.KubeDistro.Kubernetes.Repository, "kubernetesVersion": versionsBundle.KubeDistro.Kubernetes.Tag, "etcdRepository": versionsBundle.KubeDistro.Etcd.Repository, diff --git a/pkg/providers/cloudstack/template_test.go b/pkg/providers/cloudstack/template_test.go index 3572a2d154d4d..76c4043d37157 100644 --- a/pkg/providers/cloudstack/template_test.go +++ b/pkg/providers/cloudstack/template_test.go @@ -141,3 +141,31 @@ func TestTemplateBuilderGenerateCAPISpecWorkersInvalidEndpoint(t *testing.T) { _, err := templateBuilder.GenerateCAPISpecWorkers(clusterSpec, machineTemplateNames, kubeadmConfigTemplateNames) g.Expect(err).To(MatchError(ContainSubstring("building template map for MD host 1.1.1.1:: is invalid: address 1.1.1.1::: too many colons in address"))) } + +func TestTemplateBuilder_CertSANs(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_api_server_cert_san_domain_name.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_domain_name.yaml", + }, + { + Input: "testdata/cluster_api_server_cert_san_ip.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_ip.yaml", + }, + } { + g := NewWithT(t) + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + bldr := cloudstack.NewTemplateBuilder(time.Now) + + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec, func(values map[string]interface{}) { + values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(clusterSpec.Cluster) + }) + g.Expect(err).ToNot(HaveOccurred()) + + test.AssertContentToFile(t, string(data), tc.Output) + } +} diff --git a/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 0000000000000..2f89aad24aace --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,69 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANs: ["foo.bar"] + machineGroupRef: + kind: CloudStackMachineConfig + name: test + datacenterRef: + kind: CloudStackDatacenterConfig + name: test + kubernetesVersion: "1.21" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackDatacenterConfig +metadata: + name: test + namespace: test +spec: + availabilityZones: + - account: "admin" + domain: "domain1" + name: "default-az-0" + credentialsRef: "global" + zone: + name: "zone1" + network: + name: "net1" + managementApiEndpoint: "http://127.16.0.1:8080/client/api" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackMachineConfig +metadata: + name: test + namespace: test +spec: + computeOffering: + name: "m4-large" + users: + - name: "mySshUsername" + sshAuthorizedKeys: # The key below was manually generated and not used in any production systems + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" + template: + name: "kubernetes_1_21" + diskOffering: + name: "Small" + mountPath: "/data-small" + device: "/dev/vdb" + filesystem: "ext4" + label: "data_disk" + symlinks: + /var/log/kubernetes: /data-small/var/log/kubernetes + affinityGroupIds: + - control-plane-anti-affinity \ No newline at end of file diff --git a/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_ip.yaml b/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_ip.yaml new file mode 100644 index 0000000000000..1e6c951d30a36 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,70 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANs: ["11.11.11.11"] + machineGroupRef: + kind: CloudStackMachineConfig + name: test + datacenterRef: + kind: CloudStackDatacenterConfig + name: test + kubernetesVersion: "1.21" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackDatacenterConfig +metadata: + name: test + namespace: test +spec: + availabilityZones: + - account: "admin" + domain: "domain1" + name: "default-az-0" + credentialsRef: "global" + zone: + name: "zone1" + network: + name: "net1" + managementApiEndpoint: "http://127.16.0.1:8080/client/api" + +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackMachineConfig +metadata: + name: test + namespace: test +spec: + computeOffering: + name: "m4-large" + users: + - name: "mySshUsername" + sshAuthorizedKeys: # The key below was manually generated and not used in any production systems + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" + template: + name: "kubernetes_1_21" + diskOffering: + name: "Small" + mountPath: "/data-small" + device: "/dev/vdb" + filesystem: "ext4" + label: "data_disk" + symlinks: + /var/log/kubernetes: /data-small/var/log/kubernetes + affinityGroupIds: + - control-plane-anti-affinity \ No newline at end of file diff --git a/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 0000000000000..77cd30fe30f56 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,406 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackCluster + name: test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + failureDomains: + - name: default-az-0 + zone: + id: + name: zone1 + network: + id: + name: net1 + domain: domain1 + account: admin + acsEndpoint: + name: global + namespace: eksa-system +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackMachineTemplate + name: test-control-plane-1 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - foo.bar + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.158 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + preKubeadmCommands: + - swapoff -a + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + - >- + if [ ! -L /var/log/kubernetes ] ; + then + mv /var/log/kubernetes /var/log/kubernetes-$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10) ; + mkdir -p /data-small/var/log/kubernetes && ln -s /data-small/var/log/kubernetes /var/log/kubernetes ; + else echo "/var/log/kubernetes already symlnk"; + fi + diskSetup: + filesystems: + - device: /dev/vdb1 + overwrite: false + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: data_disk + partitions: + - device: /dev/vdb + layout: true + overwrite: false + tableType: gpt + mounts: + - - LABEL=data_disk + - /data-small + useExperimentalRetryJoin: true + users: + - name: mySshUsername + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackMachineTemplate +metadata: + annotations: + device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /dev/vdb + filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: ext4 + label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: data_disk + mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /data-small + symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /var/log/kubernetes:/data-small/var/log/kubernetes + creationTimestamp: null + name: test-control-plane-1 + namespace: eksa-system +spec: + template: + spec: + affinityGroupIDs: + - control-plane-anti-affinity + diskOffering: + customSizeInGB: 0 + device: /dev/vdb + filesystem: ext4 + label: data_disk + mountPath: /data-small + name: Small + offering: + name: m4-large + sshKey: "" + template: + name: kubernetes_1_21 + +--- diff --git a/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_ip.yaml b/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_ip.yaml new file mode 100644 index 0000000000000..12cf386ce5a63 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,406 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackCluster + name: test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + failureDomains: + - name: default-az-0 + zone: + id: + name: zone1 + network: + id: + name: net1 + domain: domain1 + account: admin + acsEndpoint: + name: global + namespace: eksa-system +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackMachineTemplate + name: test-control-plane-1 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - 11.11.11.11 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.158 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + preKubeadmCommands: + - swapoff -a + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + - >- + if [ ! -L /var/log/kubernetes ] ; + then + mv /var/log/kubernetes /var/log/kubernetes-$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10) ; + mkdir -p /data-small/var/log/kubernetes && ln -s /data-small/var/log/kubernetes /var/log/kubernetes ; + else echo "/var/log/kubernetes already symlnk"; + fi + diskSetup: + filesystems: + - device: /dev/vdb1 + overwrite: false + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: data_disk + partitions: + - device: /dev/vdb + layout: true + overwrite: false + tableType: gpt + mounts: + - - LABEL=data_disk + - /data-small + useExperimentalRetryJoin: true + users: + - name: mySshUsername + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackMachineTemplate +metadata: + annotations: + device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /dev/vdb + filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: ext4 + label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: data_disk + mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /data-small + symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /var/log/kubernetes:/data-small/var/log/kubernetes + creationTimestamp: null + name: test-control-plane-1 + namespace: eksa-system +spec: + template: + spec: + affinityGroupIDs: + - control-plane-anti-affinity + diskOffering: + customSizeInGB: 0 + device: /dev/vdb + filesystem: ext4 + label: data_disk + mountPath: /data-small + name: Small + offering: + name: m4-large + sshKey: "" + template: + name: kubernetes_1_21 + +--- diff --git a/pkg/providers/docker/config/template-cp.yaml b/pkg/providers/docker/config/template-cp.yaml index 30f2d3e7b1ac8..16245a8bc6152 100644 --- a/pkg/providers/docker/config/template-cp.yaml +++ b/pkg/providers/docker/config/template-cp.yaml @@ -89,6 +89,9 @@ spec: certSANs: - localhost - 127.0.0.1 + {{- with .apiServerCertSANs }} + {{- toYaml . | nindent 8 }} + {{- end }} extraArgs: audit-policy-file: /etc/kubernetes/audit-policy.yaml audit-log-path: /var/log/kubernetes/api-audit.log diff --git a/pkg/providers/docker/docker.go b/pkg/providers/docker/docker.go index 6a5a3584de5bc..d6f5f6d0ce19d 100644 --- a/pkg/providers/docker/docker.go +++ b/pkg/providers/docker/docker.go @@ -278,6 +278,7 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro "haproxyImageRepository": getHAProxyImageRepo(versionsBundle.Haproxy.Image), "haproxyImageTag": versionsBundle.Haproxy.Image.Tag(), "workerNodeGroupConfigurations": clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations, + "apiServerCertSANs": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, } if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { diff --git a/pkg/providers/docker/docker_test.go b/pkg/providers/docker/docker_test.go index 6339d98f70180..c09de9a6c54d3 100644 --- a/pkg/providers/docker/docker_test.go +++ b/pkg/providers/docker/docker_test.go @@ -986,3 +986,29 @@ func TestDockerGenerateDeploymentFileWithMirrorAndAuthConfig(t *testing.T) { test.AssertContentToFile(t, string(cp), "testdata/expected_results_mirror_with_auth_config_cp.yaml") test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_with_auth_config_md.yaml") } + +func TestTemplateBuilder_CertSANs(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_api_server_cert_san_domain_name.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_domain_name.yaml", + }, + { + Input: "testdata/cluster_api_server_cert_san_ip.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_ip.yaml", + }, + } { + g := NewWithT(t) + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + bldr := docker.NewDockerTemplateBuilder(time.Now) + + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) + g.Expect(err).ToNot(HaveOccurred()) + + test.AssertContentToFile(t, string(data), tc.Output) + } +} diff --git a/pkg/providers/docker/testdata/cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/docker/testdata/cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 0000000000000..37e4bd3eda17a --- /dev/null +++ b/pkg/providers/docker/testdata/cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,29 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + certSANs: ["foo.bar"] + datacenterRef: + kind: DockerDatacenterConfig + name: test + kubernetesVersion: "1.21" + managementCluster: + name: test +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: DockerDatacenterConfig +metadata: + name: test +spec: {} \ No newline at end of file diff --git a/pkg/providers/docker/testdata/cluster_api_server_cert_san_ip.yaml b/pkg/providers/docker/testdata/cluster_api_server_cert_san_ip.yaml new file mode 100644 index 0000000000000..471d4596ce0a0 --- /dev/null +++ b/pkg/providers/docker/testdata/cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,29 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + certSANs: ["11.11.11.11"] + datacenterRef: + kind: DockerDatacenterConfig + name: test + kubernetesVersion: "1.21" + managementCluster: + name: test +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: DockerDatacenterConfig +metadata: + name: test +spec: {} \ No newline at end of file diff --git a/pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 0000000000000..8e17b0056413d --- /dev/null +++ b/pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,280 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: cluster.local + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + namespace: eksa-system + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: test + namespace: eksa-system +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerCluster +metadata: + name: test + namespace: eksa-system +spec: + loadBalancer: + imageRepository: + imageTag: +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + extraMounts: + - containerPath: /var/run/docker.sock + hostPath: /var/run/docker.sock + customImage: public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.21.2-eks-d-1-21-4-eks-a-v0.0.0-dev-build.158 +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: + namespace: eksa-system + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - foo.bar + extraArgs: + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cgroup-driver: cgroupfs + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cgroup-driver: cgroupfs + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + replicas: 1 + version: v1.21.2-eks-1-21-4 diff --git a/pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_ip.yaml b/pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_ip.yaml new file mode 100644 index 0000000000000..1efeddc8ca669 --- /dev/null +++ b/pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,280 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: cluster.local + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + namespace: eksa-system + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: test + namespace: eksa-system +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerCluster +metadata: + name: test + namespace: eksa-system +spec: + loadBalancer: + imageRepository: + imageTag: +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + extraMounts: + - containerPath: /var/run/docker.sock + hostPath: /var/run/docker.sock + customImage: public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.21.2-eks-d-1-21-4-eks-a-v0.0.0-dev-build.158 +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: + namespace: eksa-system + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 11.11.11.11 + extraArgs: + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cgroup-driver: cgroupfs + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cgroup-driver: cgroupfs + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + replicas: 1 + version: v1.21.2-eks-1-21-4 diff --git a/pkg/providers/tinkerbell/config/template-cp.yaml b/pkg/providers/tinkerbell/config/template-cp.yaml index ae0ada4da5091..6651c1e48d8e7 100644 --- a/pkg/providers/tinkerbell/config/template-cp.yaml +++ b/pkg/providers/tinkerbell/config/template-cp.yaml @@ -94,6 +94,10 @@ spec: {{- end }} {{- end}} apiServer: + {{- with .apiServerCertSANs }} + certSANs: + {{- toYaml . | nindent 8 }} + {{- end }} extraArgs: audit-policy-file: /etc/kubernetes/audit-policy.yaml audit-log-path: /var/log/kubernetes/api-audit.log @@ -410,7 +414,7 @@ spec: {{- if .upgradeRolloutStrategy }} rolloutStrategy: rollingUpdate: - maxSurge: {{.maxSurge}} + maxSurge: {{.maxSurge}} {{- end }} version: {{.kubernetesVersion}} --- diff --git a/pkg/providers/tinkerbell/template.go b/pkg/providers/tinkerbell/template.go index 4100b092d4ccf..471ac2b162c60 100644 --- a/pkg/providers/tinkerbell/template.go +++ b/pkg/providers/tinkerbell/template.go @@ -403,6 +403,7 @@ func buildTemplateMapCP( "clusterName": clusterSpec.Cluster.Name, "controlPlaneEndpointIp": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host, "controlPlaneReplicas": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count, + "apiServerCertSANs": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, "controlPlaneSshAuthorizedKey": controlPlaneMachineSpec.Users[0].SshAuthorizedKeys[0], "controlPlaneSshUsername": controlPlaneMachineSpec.Users[0].Name, "eksaSystemNamespace": constants.EksaSystemNamespace, diff --git a/pkg/providers/tinkerbell/template_test.go b/pkg/providers/tinkerbell/template_test.go index 09bc5f22fd992..93864d24e93dc 100644 --- a/pkg/providers/tinkerbell/template_test.go +++ b/pkg/providers/tinkerbell/template_test.go @@ -2,6 +2,7 @@ package tinkerbell import ( "testing" + "time" . "github.com/onsi/gomega" @@ -130,3 +131,36 @@ func TestGenerateTemplateBuilderForMachineConfigOsImageURL(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) g.Expect(gotEtcdMachineSpec).To(Equal(expectedEtcdMachineSpec)) } + +func TestTemplateBuilder_CertSANs(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_tinkerbell_api_server_cert_san_domain_name.yaml", + Output: "testdata/expected_cluster_tinkerbell_api_server_cert_san_domain_name.yaml", + }, + { + Input: "testdata/cluster_tinkerbell_api_server_cert_san_ip.yaml", + Output: "testdata/expected_cluster_tinkerbell_api_server_cert_san_ip.yaml", + }, + } { + g := NewWithT(t) + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + cpMachineCfg, err := getControlPlaneMachineSpec(clusterSpec) + g.Expect(err).ToNot(HaveOccurred()) + + wngMachineCfgs, err := getWorkerNodeGroupMachineSpec(clusterSpec) + g.Expect(err).ToNot(HaveOccurred()) + + bldr := NewTemplateBuilder(&clusterSpec.TinkerbellDatacenter.Spec, cpMachineCfg, nil, wngMachineCfgs, "0.0.0.0", time.Now) + + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) + g.Expect(err).ToNot(HaveOccurred()) + + test.AssertContentToFile(t, string(data), tc.Output) + + } +} diff --git a/pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_domain_name.yaml b/pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_domain_name.yaml new file mode 100644 index 0000000000000..cf3e516d52614 --- /dev/null +++ b/pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_domain_name.yaml @@ -0,0 +1,62 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANS: ["foo.bar"] + machineGroupRef: + name: test + kind: TinkerbellMachineConfig + datacenterRef: + kind: TinkerbellDatacenterConfig + name: test + kubernetesVersion: "1.21" + managementCluster: + name: test +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: TinkerbellDatacenterConfig +metadata: + name: test + namespace: test +spec: + tinkerbellIP: "5.6.7.8" + osImageURL: "https://ubuntu-1-21.gz" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: TinkerbellMachineConfig +metadata: + name: test + namespace: test +spec: + hardwareSelector: + type: "node" + osFamily: ubuntu + templateRef: + kind: TinkerbellTemplateConfig + name: test + users: + - name: tink-user + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: TinkerbellTemplateConfig +metadata: + name: test +spec: + template: {} diff --git a/pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_ip.yaml b/pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_ip.yaml new file mode 100644 index 0000000000000..1f7bdd27f5ca2 --- /dev/null +++ b/pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_ip.yaml @@ -0,0 +1,62 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANS: ["11.11.11.11"] + machineGroupRef: + name: test + kind: TinkerbellMachineConfig + datacenterRef: + kind: TinkerbellDatacenterConfig + name: test + kubernetesVersion: "1.21" + managementCluster: + name: test +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: TinkerbellDatacenterConfig +metadata: + name: test + namespace: test +spec: + tinkerbellIP: "5.6.7.8" + osImageURL: "https://ubuntu-1-21.gz" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: TinkerbellMachineConfig +metadata: + name: test + namespace: test +spec: + hardwareSelector: + type: "node" + osFamily: ubuntu + templateRef: + kind: TinkerbellTemplateConfig + name: test + users: + - name: tink-user + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: TinkerbellTemplateConfig +metadata: + name: test +spec: + template: {} diff --git a/pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_domain_name.yaml b/pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_domain_name.yaml new file mode 100644 index 0000000000000..4cb6e6625d4ba --- /dev/null +++ b/pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_domain_name.yaml @@ -0,0 +1,342 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellCluster + name: test +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - foo.bar + extraArgs: + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + feature-gates: ServiceLoadBalancerClass=true + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: PROVIDER_ID + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + joinConfiguration: + nodeRegistration: + ignorePreflightErrors: + - DirAvailable--etc-kubernetes-manifests + kubeletExtraArgs: + provider-id: PROVIDER_ID + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + # kube-vip daemon in worker node watches for LoadBalancer services. + # When there is no worker node, make kube-vip in control-plane nodes watch + - name: svc_enable + value: "true" + - name: svc_election + value: "true" + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.581 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + users: + - name: tink-user + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellMachineTemplate + name: + replicas: 1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + hardwareAffinity: + required: + - labelSelector: + matchLabels: + type: node + templateOverride: | + global_timeout: 0 + id: "" + name: "" + tasks: null + version: "" + +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellCluster +metadata: + name: test + namespace: eksa-system +spec: + imageLookupFormat: --kube-v1.21.2-eks-1-21-4.raw.gz + imageLookupBaseRegistry: / \ No newline at end of file diff --git a/pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_ip.yaml b/pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_ip.yaml new file mode 100644 index 0000000000000..b497f8307d39c --- /dev/null +++ b/pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_ip.yaml @@ -0,0 +1,342 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellCluster + name: test +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - 11.11.11.11 + extraArgs: + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + feature-gates: ServiceLoadBalancerClass=true + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: PROVIDER_ID + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + joinConfiguration: + nodeRegistration: + ignorePreflightErrors: + - DirAvailable--etc-kubernetes-manifests + kubeletExtraArgs: + provider-id: PROVIDER_ID + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + # kube-vip daemon in worker node watches for LoadBalancer services. + # When there is no worker node, make kube-vip in control-plane nodes watch + - name: svc_enable + value: "true" + - name: svc_election + value: "true" + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.581 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + users: + - name: tink-user + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellMachineTemplate + name: + replicas: 1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + hardwareAffinity: + required: + - labelSelector: + matchLabels: + type: node + templateOverride: | + global_timeout: 0 + id: "" + name: "" + tasks: null + version: "" + +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellCluster +metadata: + name: test + namespace: eksa-system +spec: + imageLookupFormat: --kube-v1.21.2-eks-1-21-4.raw.gz + imageLookupBaseRegistry: / \ No newline at end of file diff --git a/pkg/providers/vsphere/config/template-cp.yaml b/pkg/providers/vsphere/config/template-cp.yaml index e29a7cf7311f5..b8c7aec986b4c 100644 --- a/pkg/providers/vsphere/config/template-cp.yaml +++ b/pkg/providers/vsphere/config/template-cp.yaml @@ -146,6 +146,10 @@ spec: {{- end }} {{- end}} apiServer: + {{- with .apiServerCertSANs }} + certSANs: + {{- toYaml . | nindent 8 }} + {{- end }} extraArgs: cloud-provider: external audit-policy-file: /etc/kubernetes/audit-policy.yaml diff --git a/pkg/providers/vsphere/template.go b/pkg/providers/vsphere/template.go index fceff21948759..8326ba48263aa 100644 --- a/pkg/providers/vsphere/template.go +++ b/pkg/providers/vsphere/template.go @@ -151,6 +151,7 @@ func buildTemplateMapCP( "clusterName": clusterSpec.Cluster.Name, "controlPlaneEndpointIp": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host, "controlPlaneReplicas": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count, + "apiServerCertSANs": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, "kubernetesRepository": versionsBundle.KubeDistro.Kubernetes.Repository, "kubernetesVersion": versionsBundle.KubeDistro.Kubernetes.Tag, "etcdRepository": versionsBundle.KubeDistro.Etcd.Repository, diff --git a/pkg/providers/vsphere/template_test.go b/pkg/providers/vsphere/template_test.go index 9415f73b0053c..5443d40829a96 100644 --- a/pkg/providers/vsphere/template_test.go +++ b/pkg/providers/vsphere/template_test.go @@ -5,13 +5,9 @@ import ( "time" . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/aws/eks-anywhere/internal/test" - v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" - "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/providers/vsphere" - "github.com/aws/eks-anywhere/pkg/utils/ptr" ) func TestVsphereTemplateBuilderGenerateCAPISpecWorkersInvalidSSHKey(t *testing.T) { @@ -53,75 +49,32 @@ func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidEtcdSSHKey(t * ) } -func invalidSSHKey() string { - return "ssh-rsa AAAA B3NzaC1K73CeQ== testemail@test.com" -} +func TestTemplateBuilder_CertSANs(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_api_server_cert_san_domain_name.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_domain_name.yaml", + }, + { + Input: "testdata/cluster_api_server_cert_san_ip.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_ip.yaml", + }, + } { + g := NewWithT(t) + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + bldr := vsphere.NewVsphereTemplateBuilder(time.Now) -func vsphereClusterSpec(opts ...test.ClusterSpecOpt) *cluster.Spec { - spec := test.NewClusterSpec(func(s *cluster.Spec) { - s.Cluster.Name = "test-cluster" - s.Cluster.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{ - Count: 3, - Endpoint: &v1alpha1.Endpoint{ - Host: "test-ip", - }, - MachineGroupRef: &v1alpha1.Ref{ - Kind: v1alpha1.VSphereMachineConfigKind, - Name: "eksa-unit-test", - }, - } - s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{ - Name: "md-0", - Count: ptr.Int(3), - MachineGroupRef: &v1alpha1.Ref{ - Kind: v1alpha1.VSphereMachineConfigKind, - Name: "eksa-unit-test", - }, - }} - s.Cluster.Spec.ClusterNetwork = v1alpha1.ClusterNetwork{ - CNIConfig: &v1alpha1.CNIConfig{Cilium: &v1alpha1.CiliumConfig{}}, - Pods: v1alpha1.Pods{ - CidrBlocks: []string{"192.168.0.0/16"}, - }, - Services: v1alpha1.Services{ - CidrBlocks: []string{"10.96.0.0/12"}, - }, - } - s.Cluster.Spec.DatacenterRef = v1alpha1.Ref{ - Kind: v1alpha1.VSphereDatacenterKind, - Name: "eksa-unit-test", - } - s.VSphereDatacenter = &v1alpha1.VSphereDatacenterConfig{ - Spec: v1alpha1.VSphereDatacenterConfigSpec{ - Datacenter: "test", - Network: "test", - Server: "test", - }, - } - s.Cluster.Spec.DatacenterRef = v1alpha1.Ref{ - Kind: v1alpha1.VSphereDatacenterKind, - Name: "vsphere test", - } - s.VSphereMachineConfigs = map[string]*v1alpha1.VSphereMachineConfig{ - "eksa-unit-test": { - ObjectMeta: metav1.ObjectMeta{ - Name: "eksa-unit-test", - }, - Spec: v1alpha1.VSphereMachineConfigSpec{ - Users: []v1alpha1.UserConfiguration{ - { - Name: "capv", - SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=="}, - }, - }, - }, - }, - } - }) + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) + g.Expect(err).ToNot(HaveOccurred()) - for _, op := range opts { - op(spec) + test.AssertContentToFile(t, string(data), tc.Output) } +} - return spec +func invalidSSHKey() string { + return "ssh-rsa AAAA B3NzaC1K73CeQ== testemail@test.com" } diff --git a/pkg/providers/vsphere/testdata/cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/vsphere/testdata/cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 0000000000000..d37d4e6efc7d8 --- /dev/null +++ b/pkg/providers/vsphere/testdata/cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,58 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANs: ["foo.bar"] + machineGroupRef: + name: test + kind: VSphereMachineConfig + kubernetesVersion: "1.19" + datacenterRef: + kind: VSphereDatacenterConfig + name: test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test + namespace: test +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 8192 + numCPUs: 2 + osFamily: ubuntu + resourcePool: "*/Resources" + template: "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6" + users: + - name: capv + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereDatacenterConfig +metadata: + name: test + namespace: test +spec: + datacenter: "SDDC-Datacenter" + network: "/SDDC-Datacenter/network/sddc-cgw-network-1" + server: "vsphere_server" + thumbprint: "ABCDEFG" + insecure: false diff --git a/pkg/providers/vsphere/testdata/cluster_api_server_cert_san_ip.yaml b/pkg/providers/vsphere/testdata/cluster_api_server_cert_san_ip.yaml new file mode 100644 index 0000000000000..13fd452932337 --- /dev/null +++ b/pkg/providers/vsphere/testdata/cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,58 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANs: ["11.11.11.11"] + machineGroupRef: + name: test + kind: VSphereMachineConfig + kubernetesVersion: "1.19" + datacenterRef: + kind: VSphereDatacenterConfig + name: test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test + namespace: test +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 8192 + numCPUs: 2 + osFamily: ubuntu + resourcePool: "*/Resources" + template: "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6" + users: + - name: capv + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereDatacenterConfig +metadata: + name: test + namespace: test +spec: + datacenter: "SDDC-Datacenter" + network: "/SDDC-Datacenter/network/sddc-cgw-network-1" + server: "vsphere_server" + thumbprint: "ABCDEFG" + insecure: false diff --git a/pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 0000000000000..2bc454912b0e1 --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,633 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + identityRef: + kind: Secret + name: test-vsphere-credentials + server: vsphere_server + thumbprint: 'ABCDEFG' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 2 + resourcePool: '*/Resources' + server: vsphere_server + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + apiServer: + certSANs: + - foo.bar + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.2-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + useExperimentalRetryJoin: true + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 1 + version: v1.19.8-eks-1-19-4 +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-cpi + namespace: eksa-system +spec: + strategy: Reconcile + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + resources: + - kind: Secret + name: test-cloud-controller-manager + - kind: Secret + name: test-cloud-provider-vsphere-credentials + - kind: ConfigMap + name: test-cpi-manifests +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-vsphere-credentials + namespace: eksa-system + labels: + clusterctl.cluster.x-k8s.io/move: "true" +stringData: + username: "" + password: "" +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-controller-manager + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-provider-vsphere-credentials + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + vsphere_server.password: "" + vsphere_server.username: "" + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: "ABCDEFG" + insecureFlag: false + vcenter: + vsphere_server: + datacenters: + - 'SDDC-Datacenter' + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + server: 'vsphere_server' + thumbprint: 'ABCDEFG' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + kind: Service + metadata: + labels: + component: cloud-controller-manager + name: cloud-controller-manager + namespace: kube-system + spec: + ports: + - port: 443 + protocol: TCP + targetPort: 43001 + selector: + component: cloud-controller-manager + type: NodePort + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: vsphere-cloud-controller-manager + template: + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + spec: + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes/cloud-provider-vsphere/cpi/manager:v1.18.1-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + - effect: NoSchedule + key: node.kubernetes.io/not-ready + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: test-cpi-manifests + namespace: eksa-system diff --git a/pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_ip.yaml b/pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_ip.yaml new file mode 100644 index 0000000000000..58004602c476b --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,633 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + identityRef: + kind: Secret + name: test-vsphere-credentials + server: vsphere_server + thumbprint: 'ABCDEFG' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 2 + resourcePool: '*/Resources' + server: vsphere_server + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + apiServer: + certSANs: + - 11.11.11.11 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.2-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + useExperimentalRetryJoin: true + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 1 + version: v1.19.8-eks-1-19-4 +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-cpi + namespace: eksa-system +spec: + strategy: Reconcile + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + resources: + - kind: Secret + name: test-cloud-controller-manager + - kind: Secret + name: test-cloud-provider-vsphere-credentials + - kind: ConfigMap + name: test-cpi-manifests +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-vsphere-credentials + namespace: eksa-system + labels: + clusterctl.cluster.x-k8s.io/move: "true" +stringData: + username: "" + password: "" +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-controller-manager + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-provider-vsphere-credentials + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + vsphere_server.password: "" + vsphere_server.username: "" + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: "ABCDEFG" + insecureFlag: false + vcenter: + vsphere_server: + datacenters: + - 'SDDC-Datacenter' + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + server: 'vsphere_server' + thumbprint: 'ABCDEFG' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + kind: Service + metadata: + labels: + component: cloud-controller-manager + name: cloud-controller-manager + namespace: kube-system + spec: + ports: + - port: 443 + protocol: TCP + targetPort: 43001 + selector: + component: cloud-controller-manager + type: NodePort + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: vsphere-cloud-controller-manager + template: + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + spec: + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes/cloud-provider-vsphere/cpi/manager:v1.18.1-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + - effect: NoSchedule + key: node.kubernetes.io/not-ready + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: test-cpi-manifests + namespace: eksa-system diff --git a/pkg/templater/templater.go b/pkg/templater/templater.go index afcd82e6f676b..53d0b8d91ecd0 100644 --- a/pkg/templater/templater.go +++ b/pkg/templater/templater.go @@ -6,7 +6,9 @@ import ( "strings" "text/template" + "github.com/Masterminds/sprig" "github.com/aws/eks-anywhere/pkg/filewriter" + "sigs.k8s.io/yaml" ) type Templater struct { @@ -42,25 +44,34 @@ func (t *Templater) WriteBytesToFile(content []byte, fileName string, f ...filew } func Execute(templateContent string, data interface{}) ([]byte, error) { - temp := template.New("tmpl") - funcMap := map[string]interface{}{ - "indent": func(spaces int, v string) string { - pad := strings.Repeat(" ", spaces) - return pad + strings.Replace(v, "\n", "\n"+pad, -1) - }, + // Apply sprig functions for easy templating. + // See https://masterminds.github.io/sprig/ for a list of available functions. + fns := sprig.TxtFuncMap() + for k, v := range map[string]any{ "stringsJoin": strings.Join, + "toYaml": toYAML, + } { + fns[k] = v } - temp = temp.Funcs(funcMap) - temp, err := temp.Parse(templateContent) + tpl := template.New("").Funcs(fns) + tpl, err := tpl.Parse(templateContent) if err != nil { return nil, fmt.Errorf("parsing template: %v", err) } var buf bytes.Buffer - err = temp.Execute(&buf, data) - if err != nil { + if err := tpl.Execute(&buf, data); err != nil { return nil, fmt.Errorf("substituting values for template: %v", err) } + return buf.Bytes(), nil } + +func toYAML(v any) string { + data, err := yaml.Marshal(v) + if err != nil { + return "" + } + return strings.TrimSuffix(string(data), "\n") +}