diff --git a/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml b/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml index 92b773ab2c1f..001e55b87de9 100644 --- a/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml +++ b/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml @@ -124,6 +124,12 @@ spec: type: object controlPlaneConfiguration: properties: + certSans: + description: CertSANs is a slice of domain names or IPs to be + added as Subject Name Alternatives of the Kube API Servers Certificate. + items: + type: string + type: array count: description: Count defines the number of desired control plane nodes. Defaults to 1. diff --git a/pkg/api/v1alpha1/cluster.go b/pkg/api/v1alpha1/cluster.go index 6fcc04fdf2c3..b642d37263b2 100644 --- a/pkg/api/v1alpha1/cluster.go +++ b/pkg/api/v1alpha1/cluster.go @@ -188,6 +188,7 @@ var clusterConfigValidations = []func(*Cluster) error{ validateControlPlaneLabels, validatePackageControllerConfiguration, validateEksaVersion, + validateControlPlaneCertSANs, } // GetClusterConfig parses a Cluster object from a multiobject yaml file in disk @@ -419,6 +420,26 @@ func validateControlPlaneEndpoint(clusterConfig *Cluster) error { return nil } +var domainNameRegex = regexp.MustCompile(`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9]`) + +func validateControlPlaneCertSANs(cfg *Cluster) error { + var invalid []string + for _, san := range cfg.Spec.ControlPlaneConfiguration.CertSANs { + isDomain := domainNameRegex.MatchString(san) + isIP := net.ParseIP(san) + + if !isDomain && isIP == nil { + invalid = append(invalid, san) + } + } + + if len(invalid) > 0 { + return fmt.Errorf("invalid ControlPlaneConfiguration.CertSANs; must be an IP or domain name: [%v]", strings.Join(invalid, ", ")) + } + + return nil +} + func validateWorkerNodeGroups(clusterConfig *Cluster) error { workerNodeGroupConfigs := clusterConfig.Spec.WorkerNodeGroupConfigurations if len(workerNodeGroupConfigs) <= 0 { diff --git a/pkg/api/v1alpha1/cluster_types.go b/pkg/api/v1alpha1/cluster_types.go index 5dfc62be1222..9bf87590c867 100644 --- a/pkg/api/v1alpha1/cluster_types.go +++ b/pkg/api/v1alpha1/cluster_types.go @@ -282,6 +282,9 @@ type ControlPlaneConfiguration struct { // SkipLoadBalancerDeployment skip deploying control plane load balancer. // Make sure your infrastructure can handle control plane load balancing when you set this field to true. SkipLoadBalancerDeployment bool `json:"skipLoadBalancerDeployment,omitempty"` + // CertSANs is a slice of domain names or IPs to be added as Subject Name Alternatives of the + // Kube API Servers Certificate. + CertSANs []string `json:"certSans,omitempty"` } // MachineHealthCheck allows to configure timeouts for machine health checks. Machine Health Checks are responsible for remediating unhealthy Machines. @@ -335,7 +338,8 @@ func (n *ControlPlaneConfiguration) Equal(o *ControlPlaneConfiguration) bool { return false } return n.Count == o.Count && n.MachineGroupRef.Equal(o.MachineGroupRef) && - TaintsSliceEqual(n.Taints, o.Taints) && MapEqual(n.Labels, o.Labels) + TaintsSliceEqual(n.Taints, o.Taints) && MapEqual(n.Labels, o.Labels) && + SliceEqual(n.CertSANs, o.CertSANs) } type Endpoint struct { diff --git a/pkg/api/v1alpha1/cluster_types_test.go b/pkg/api/v1alpha1/cluster_types_test.go index a63637006878..cd8687ea6a34 100644 --- a/pkg/api/v1alpha1/cluster_types_test.go +++ b/pkg/api/v1alpha1/cluster_types_test.go @@ -3169,3 +3169,64 @@ func TestCNIConfigIsManaged(t *testing.T) { }) } } + +func TestValidateCluster(t *testing.T) { + for _, tc := range []struct { + Name string + Cluster *v1alpha1.Cluster + ExpectContains []string + }{ + { + Name: "APIServerCertSAN_DomainName", + Cluster: baseCluster(func(c *v1alpha1.Cluster) { + c.Spec.ControlPlaneConfiguration.CertSANs = []string{"domain.com"} + }), + }, + { + Name: "APIServerCertSAN_DomainName", + Cluster: baseCluster(func(c *v1alpha1.Cluster) { + c.Spec.ControlPlaneConfiguration.CertSANs = []string{"domain%com"} + }), + ExpectContains: []string{"domain%com"}, + }, + { + Name: "APIServerCertSAN_IP", + Cluster: baseCluster(func(c *v1alpha1.Cluster) { + c.Spec.ControlPlaneConfiguration.CertSANs = []string{"11.11.11.11"} + }), + }, + { + Name: "APIServerCertSAN_Empty", + Cluster: baseCluster(func(c *v1alpha1.Cluster) { + c.Spec.ControlPlaneConfiguration.CertSANs = []string{""} + }), + ExpectContains: []string{""}, + }, + { + Name: "APIServerCertSAN_Multi", + Cluster: baseCluster(func(c *v1alpha1.Cluster) { + c.Spec.ControlPlaneConfiguration.CertSANs = []string{"11.11.11.11", "domain.com"} + }), + }, + { + Name: "APIServerCertSAN_Multi", + Cluster: baseCluster(func(c *v1alpha1.Cluster) { + c.Spec.ControlPlaneConfiguration.CertSANs = []string{"11.11.11.11", "domain%com"} + }), + ExpectContains: []string{"domain%com"}, + }, + } { + t.Run(tc.Name, func(t *testing.T) { + g := NewWithT(t) + err := tc.Cluster.Validate() + if len(tc.ExpectContains) > 0 { + g.Expect(err).To(HaveOccurred()) + for _, str := range tc.ExpectContains { + g.Expect(err.Error()).To(ContainSubstring(str)) + } + } else { + g.Expect(err).To(Succeed()) + } + }) + } +} diff --git a/pkg/api/v1alpha1/zz_generated.deepcopy.go b/pkg/api/v1alpha1/zz_generated.deepcopy.go index 66ed81d60bd3..9cbce5238b06 100644 --- a/pkg/api/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/api/v1alpha1/zz_generated.deepcopy.go @@ -867,6 +867,11 @@ func (in *ControlPlaneConfiguration) DeepCopyInto(out *ControlPlaneConfiguration *out = new(ControlPlaneUpgradeRolloutStrategy) **out = **in } + if in.CertSANs != nil { + in, out := &in.CertSANs, &out.CertSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneConfiguration. diff --git a/pkg/clusterapi/apibuilder.go b/pkg/clusterapi/apibuilder.go index 4b3f27ac9071..d4340e73023f 100644 --- a/pkg/clusterapi/apibuilder.go +++ b/pkg/clusterapi/apibuilder.go @@ -159,6 +159,7 @@ func KubeadmControlPlane(clusterSpec *cluster.Spec, infrastructureObject APIObje ExtraArgs: map[string]string{}, ExtraVolumes: []bootstrapv1.HostPathMount{}, }, + CertSANs: clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, }, ControllerManager: bootstrapv1.ControlPlaneComponent{ ExtraArgs: ControllerManagerArgs(clusterSpec), @@ -222,6 +223,7 @@ func KubeadmConfigTemplate(clusterSpec *cluster.Spec, workerNodeGroupConfig anyw ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{ ExtraArgs: map[string]string{}, }, + CertSANs: clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, }, }, JoinConfiguration: &bootstrapv1.JoinConfiguration{ diff --git a/pkg/clusterapi/apibuilder_test.go b/pkg/clusterapi/apibuilder_test.go index bcba2a2a13e0..3a50210f3de1 100644 --- a/pkg/clusterapi/apibuilder_test.go +++ b/pkg/clusterapi/apibuilder_test.go @@ -96,6 +96,7 @@ func newApiBuilerTest(t *testing.T) apiBuilerTest { "key1": "val1", "key2": "val2", }, + CertSANs: []string{"foo.bar", "11.11.11.11"}, }, KubernetesVersion: "1.21", }, @@ -311,6 +312,7 @@ func wantKubeadmControlPlane() *controlplanev1.KubeadmControlPlane { ExtraArgs: map[string]string{}, ExtraVolumes: []bootstrapv1.HostPathMount{}, }, + CertSANs: []string{"foo.bar", "11.11.11.11"}, }, ControllerManager: bootstrapv1.ControlPlaneComponent{ ExtraArgs: tlsCipherSuitesArgs(), @@ -392,6 +394,7 @@ func wantKubeadmConfigTemplate() *bootstrapv1.KubeadmConfigTemplate { ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{ ExtraArgs: map[string]string{}, }, + CertSANs: []string{"foo.bar", "11.11.11.11"}, }, }, JoinConfiguration: &bootstrapv1.JoinConfiguration{ diff --git a/pkg/clusterapi/identity_test.go b/pkg/clusterapi/identity_test.go index 12b859de646e..95455c5a4ac3 100644 --- a/pkg/clusterapi/identity_test.go +++ b/pkg/clusterapi/identity_test.go @@ -105,6 +105,7 @@ func TestConfigureAWSIAMAuthInKubeadmControlPlane(t *testing.T) { }, }, }, + CertSANs: []string{"foo.bar", "11.11.11.11"}, }, ControllerManager: bootstrapv1.ControlPlaneComponent{ ExtraArgs: tlsCipherSuitesArgs(), @@ -302,6 +303,7 @@ func TestConfigureOIDCInKubeadmControlPlane(t *testing.T) { }, ExtraVolumes: []bootstrapv1.HostPathMount{}, }, + CertSANs: []string{"foo.bar", "11.11.11.11"}, }, ControllerManager: bootstrapv1.ControlPlaneComponent{ ExtraArgs: tlsCipherSuitesArgs(), @@ -426,6 +428,7 @@ func TestConfigurePodIamAuthInKubeadmControlPlane(t *testing.T) { }, ExtraVolumes: []bootstrapv1.HostPathMount{}, }, + CertSANs: []string{"foo.bar", "11.11.11.11"}, }, ControllerManager: bootstrapv1.ControlPlaneComponent{ ExtraArgs: tlsCipherSuitesArgs(), diff --git a/pkg/providers/cloudstack/config/template-cp.yaml b/pkg/providers/cloudstack/config/template-cp.yaml index bd16a7c79678..9442052dcef0 100644 --- a/pkg/providers/cloudstack/config/template-cp.yaml +++ b/pkg/providers/cloudstack/config/template-cp.yaml @@ -87,6 +87,10 @@ spec: imageRepository: {{.corednsRepository}} imageTag: {{.corednsVersion}} apiServer: + {{- with .apiServerCertSANs }} + certSANs: + {{- toYaml . | nindent 8 }} + {{- end }} extraArgs: cloud-provider: external audit-policy-file: /etc/kubernetes/audit-policy.yaml diff --git a/pkg/providers/cloudstack/template.go b/pkg/providers/cloudstack/template.go index f12798f22b72..154387336fe7 100644 --- a/pkg/providers/cloudstack/template.go +++ b/pkg/providers/cloudstack/template.go @@ -143,6 +143,7 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro "controlPlaneEndpointHost": host, "controlPlaneEndpointPort": port, "controlPlaneReplicas": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count, + "apiServerCertSANs": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, "kubernetesRepository": versionsBundle.KubeDistro.Kubernetes.Repository, "kubernetesVersion": versionsBundle.KubeDistro.Kubernetes.Tag, "etcdRepository": versionsBundle.KubeDistro.Etcd.Repository, diff --git a/pkg/providers/cloudstack/template_test.go b/pkg/providers/cloudstack/template_test.go index 3572a2d154d4..76c4043d3715 100644 --- a/pkg/providers/cloudstack/template_test.go +++ b/pkg/providers/cloudstack/template_test.go @@ -141,3 +141,31 @@ func TestTemplateBuilderGenerateCAPISpecWorkersInvalidEndpoint(t *testing.T) { _, err := templateBuilder.GenerateCAPISpecWorkers(clusterSpec, machineTemplateNames, kubeadmConfigTemplateNames) g.Expect(err).To(MatchError(ContainSubstring("building template map for MD host 1.1.1.1:: is invalid: address 1.1.1.1::: too many colons in address"))) } + +func TestTemplateBuilder_CertSANs(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_api_server_cert_san_domain_name.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_domain_name.yaml", + }, + { + Input: "testdata/cluster_api_server_cert_san_ip.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_ip.yaml", + }, + } { + g := NewWithT(t) + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + bldr := cloudstack.NewTemplateBuilder(time.Now) + + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec, func(values map[string]interface{}) { + values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(clusterSpec.Cluster) + }) + g.Expect(err).ToNot(HaveOccurred()) + + test.AssertContentToFile(t, string(data), tc.Output) + } +} diff --git a/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 000000000000..2f89aad24aac --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,69 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANs: ["foo.bar"] + machineGroupRef: + kind: CloudStackMachineConfig + name: test + datacenterRef: + kind: CloudStackDatacenterConfig + name: test + kubernetesVersion: "1.21" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackDatacenterConfig +metadata: + name: test + namespace: test +spec: + availabilityZones: + - account: "admin" + domain: "domain1" + name: "default-az-0" + credentialsRef: "global" + zone: + name: "zone1" + network: + name: "net1" + managementApiEndpoint: "http://127.16.0.1:8080/client/api" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackMachineConfig +metadata: + name: test + namespace: test +spec: + computeOffering: + name: "m4-large" + users: + - name: "mySshUsername" + sshAuthorizedKeys: # The key below was manually generated and not used in any production systems + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" + template: + name: "kubernetes_1_21" + diskOffering: + name: "Small" + mountPath: "/data-small" + device: "/dev/vdb" + filesystem: "ext4" + label: "data_disk" + symlinks: + /var/log/kubernetes: /data-small/var/log/kubernetes + affinityGroupIds: + - control-plane-anti-affinity \ No newline at end of file diff --git a/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_ip.yaml b/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_ip.yaml new file mode 100644 index 000000000000..1e6c951d30a3 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,70 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANs: ["11.11.11.11"] + machineGroupRef: + kind: CloudStackMachineConfig + name: test + datacenterRef: + kind: CloudStackDatacenterConfig + name: test + kubernetesVersion: "1.21" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackDatacenterConfig +metadata: + name: test + namespace: test +spec: + availabilityZones: + - account: "admin" + domain: "domain1" + name: "default-az-0" + credentialsRef: "global" + zone: + name: "zone1" + network: + name: "net1" + managementApiEndpoint: "http://127.16.0.1:8080/client/api" + +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackMachineConfig +metadata: + name: test + namespace: test +spec: + computeOffering: + name: "m4-large" + users: + - name: "mySshUsername" + sshAuthorizedKeys: # The key below was manually generated and not used in any production systems + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" + template: + name: "kubernetes_1_21" + diskOffering: + name: "Small" + mountPath: "/data-small" + device: "/dev/vdb" + filesystem: "ext4" + label: "data_disk" + symlinks: + /var/log/kubernetes: /data-small/var/log/kubernetes + affinityGroupIds: + - control-plane-anti-affinity \ No newline at end of file diff --git a/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 000000000000..77cd30fe30f5 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,406 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackCluster + name: test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + failureDomains: + - name: default-az-0 + zone: + id: + name: zone1 + network: + id: + name: net1 + domain: domain1 + account: admin + acsEndpoint: + name: global + namespace: eksa-system +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackMachineTemplate + name: test-control-plane-1 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - foo.bar + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.158 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + preKubeadmCommands: + - swapoff -a + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + - >- + if [ ! -L /var/log/kubernetes ] ; + then + mv /var/log/kubernetes /var/log/kubernetes-$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10) ; + mkdir -p /data-small/var/log/kubernetes && ln -s /data-small/var/log/kubernetes /var/log/kubernetes ; + else echo "/var/log/kubernetes already symlnk"; + fi + diskSetup: + filesystems: + - device: /dev/vdb1 + overwrite: false + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: data_disk + partitions: + - device: /dev/vdb + layout: true + overwrite: false + tableType: gpt + mounts: + - - LABEL=data_disk + - /data-small + useExperimentalRetryJoin: true + users: + - name: mySshUsername + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackMachineTemplate +metadata: + annotations: + device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /dev/vdb + filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: ext4 + label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: data_disk + mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /data-small + symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /var/log/kubernetes:/data-small/var/log/kubernetes + creationTimestamp: null + name: test-control-plane-1 + namespace: eksa-system +spec: + template: + spec: + affinityGroupIDs: + - control-plane-anti-affinity + diskOffering: + customSizeInGB: 0 + device: /dev/vdb + filesystem: ext4 + label: data_disk + mountPath: /data-small + name: Small + offering: + name: m4-large + sshKey: "" + template: + name: kubernetes_1_21 + +--- diff --git a/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_ip.yaml b/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_ip.yaml new file mode 100644 index 000000000000..12cf386ce5a6 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,406 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackCluster + name: test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + failureDomains: + - name: default-az-0 + zone: + id: + name: zone1 + network: + id: + name: net1 + domain: domain1 + account: admin + acsEndpoint: + name: global + namespace: eksa-system +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackMachineTemplate + name: test-control-plane-1 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - 11.11.11.11 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.158 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + preKubeadmCommands: + - swapoff -a + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + - >- + if [ ! -L /var/log/kubernetes ] ; + then + mv /var/log/kubernetes /var/log/kubernetes-$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10) ; + mkdir -p /data-small/var/log/kubernetes && ln -s /data-small/var/log/kubernetes /var/log/kubernetes ; + else echo "/var/log/kubernetes already symlnk"; + fi + diskSetup: + filesystems: + - device: /dev/vdb1 + overwrite: false + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: data_disk + partitions: + - device: /dev/vdb + layout: true + overwrite: false + tableType: gpt + mounts: + - - LABEL=data_disk + - /data-small + useExperimentalRetryJoin: true + users: + - name: mySshUsername + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackMachineTemplate +metadata: + annotations: + device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /dev/vdb + filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: ext4 + label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: data_disk + mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /data-small + symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /var/log/kubernetes:/data-small/var/log/kubernetes + creationTimestamp: null + name: test-control-plane-1 + namespace: eksa-system +spec: + template: + spec: + affinityGroupIDs: + - control-plane-anti-affinity + diskOffering: + customSizeInGB: 0 + device: /dev/vdb + filesystem: ext4 + label: data_disk + mountPath: /data-small + name: Small + offering: + name: m4-large + sshKey: "" + template: + name: kubernetes_1_21 + +--- diff --git a/pkg/providers/docker/config/template-cp.yaml b/pkg/providers/docker/config/template-cp.yaml index fed04f28ad27..793051977eb9 100644 --- a/pkg/providers/docker/config/template-cp.yaml +++ b/pkg/providers/docker/config/template-cp.yaml @@ -89,6 +89,9 @@ spec: certSANs: - localhost - 127.0.0.1 + {{- with .apiServerCertSANs }} + {{- toYaml . | nindent 8 }} + {{- end }} extraArgs: audit-policy-file: /etc/kubernetes/audit-policy.yaml audit-log-path: /var/log/kubernetes/api-audit.log diff --git a/pkg/providers/docker/docker.go b/pkg/providers/docker/docker.go index 25627a649c26..c4bf36d48bb9 100644 --- a/pkg/providers/docker/docker.go +++ b/pkg/providers/docker/docker.go @@ -277,6 +277,7 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro "haproxyImageRepository": getHAProxyImageRepo(versionsBundle.Haproxy.Image), "haproxyImageTag": versionsBundle.Haproxy.Image.Tag(), "workerNodeGroupConfigurations": clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations, + "apiServerCertSANs": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, } if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { diff --git a/pkg/providers/docker/docker_test.go b/pkg/providers/docker/docker_test.go index e1b663ccbbd4..22f7b01dc919 100644 --- a/pkg/providers/docker/docker_test.go +++ b/pkg/providers/docker/docker_test.go @@ -985,3 +985,29 @@ func TestDockerGenerateDeploymentFileWithMirrorAndAuthConfig(t *testing.T) { test.AssertContentToFile(t, string(cp), "testdata/expected_results_mirror_with_auth_config_cp.yaml") test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_with_auth_config_md.yaml") } + +func TestTemplateBuilder_CertSANs(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_api_server_cert_san_domain_name.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_domain_name.yaml", + }, + { + Input: "testdata/cluster_api_server_cert_san_ip.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_ip.yaml", + }, + } { + g := NewWithT(t) + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + bldr := docker.NewDockerTemplateBuilder(time.Now) + + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) + g.Expect(err).ToNot(HaveOccurred()) + + test.AssertContentToFile(t, string(data), tc.Output) + } +} diff --git a/pkg/providers/docker/testdata/cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/docker/testdata/cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 000000000000..37e4bd3eda17 --- /dev/null +++ b/pkg/providers/docker/testdata/cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,29 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + certSANs: ["foo.bar"] + datacenterRef: + kind: DockerDatacenterConfig + name: test + kubernetesVersion: "1.21" + managementCluster: + name: test +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: DockerDatacenterConfig +metadata: + name: test +spec: {} \ No newline at end of file diff --git a/pkg/providers/docker/testdata/cluster_api_server_cert_san_ip.yaml b/pkg/providers/docker/testdata/cluster_api_server_cert_san_ip.yaml new file mode 100644 index 000000000000..471d4596ce0a --- /dev/null +++ b/pkg/providers/docker/testdata/cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,29 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + certSANs: ["11.11.11.11"] + datacenterRef: + kind: DockerDatacenterConfig + name: test + kubernetesVersion: "1.21" + managementCluster: + name: test +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: DockerDatacenterConfig +metadata: + name: test +spec: {} \ No newline at end of file diff --git a/pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 000000000000..8e17b0056413 --- /dev/null +++ b/pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,280 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: cluster.local + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + namespace: eksa-system + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: test + namespace: eksa-system +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerCluster +metadata: + name: test + namespace: eksa-system +spec: + loadBalancer: + imageRepository: + imageTag: +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + extraMounts: + - containerPath: /var/run/docker.sock + hostPath: /var/run/docker.sock + customImage: public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.21.2-eks-d-1-21-4-eks-a-v0.0.0-dev-build.158 +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: + namespace: eksa-system + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - foo.bar + extraArgs: + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cgroup-driver: cgroupfs + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cgroup-driver: cgroupfs + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + replicas: 1 + version: v1.21.2-eks-1-21-4 diff --git a/pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_ip.yaml b/pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_ip.yaml new file mode 100644 index 000000000000..1efeddc8ca66 --- /dev/null +++ b/pkg/providers/docker/testdata/expected_cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,280 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: cluster.local + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + namespace: eksa-system + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: test + namespace: eksa-system +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerCluster +metadata: + name: test + namespace: eksa-system +spec: + loadBalancer: + imageRepository: + imageTag: +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + extraMounts: + - containerPath: /var/run/docker.sock + hostPath: /var/run/docker.sock + customImage: public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.21.2-eks-d-1-21-4-eks-a-v0.0.0-dev-build.158 +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: + namespace: eksa-system + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 11.11.11.11 + extraArgs: + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cgroup-driver: cgroupfs + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cgroup-driver: cgroupfs + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + replicas: 1 + version: v1.21.2-eks-1-21-4 diff --git a/pkg/providers/nutanix/config/cp-template.yaml b/pkg/providers/nutanix/config/cp-template.yaml index e89baeb888e5..6ec2ffd21064 100644 --- a/pkg/providers/nutanix/config/cp-template.yaml +++ b/pkg/providers/nutanix/config/cp-template.yaml @@ -65,6 +65,9 @@ spec: - localhost - 127.0.0.1 - 0.0.0.0 + {{- with .apiServerCertSANs }} + {{- toYaml . | nindent 10 }} + {{- end }} {{- if .apiServerExtraArgs }} extraArgs: {{ .apiServerExtraArgs.ToYaml | indent 10 }} diff --git a/pkg/providers/nutanix/template.go b/pkg/providers/nutanix/template.go index 5966752a6bd9..8ad9d5fe724d 100644 --- a/pkg/providers/nutanix/template.go +++ b/pkg/providers/nutanix/template.go @@ -200,6 +200,7 @@ func buildTemplateMapCP( "subnetIDType": controlPlaneMachineSpec.Subnet.Type, "subnetName": controlPlaneMachineSpec.Subnet.Name, "subnetUUID": controlPlaneMachineSpec.Subnet.UUID, + "apiServerCertSANs": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, } if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration != nil { diff --git a/pkg/providers/nutanix/template_test.go b/pkg/providers/nutanix/template_test.go index 945a63433891..a4f337cdcabd 100644 --- a/pkg/providers/nutanix/template_test.go +++ b/pkg/providers/nutanix/template_test.go @@ -413,6 +413,35 @@ func TestNewNutanixTemplateBuilderProxy(t *testing.T) { assert.Equal(t, expectedWorkersSpec, workerSpec) } +func TestTemplateBuilder_CertSANs(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_api_server_cert_san_domain_name.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_domain_name.yaml", + }, + { + Input: "testdata/cluster_api_server_cert_san_ip.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_ip.yaml", + }, + } { + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + machineCfg := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + creds := GetCredsFromEnv() + + bldr := NewNutanixTemplateBuilder(&clusterSpec.NutanixDatacenter.Spec, &machineCfg.Spec, nil, + map[string]anywherev1.NutanixMachineConfigSpec{}, creds, time.Now) + + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) + assert.NoError(t, err) + + test.AssertContentToFile(t, string(data), tc.Output) + } +} + func minimalNutanixConfigSpec(t *testing.T) (*anywherev1.NutanixDatacenterConfig, *anywherev1.NutanixMachineConfig, map[string]anywherev1.NutanixMachineConfigSpec) { dcConf := &anywherev1.NutanixDatacenterConfig{} err := yaml.Unmarshal([]byte(nutanixDatacenterConfigSpec), dcConf) diff --git a/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 000000000000..fa51039a4ff0 --- /dev/null +++ b/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,64 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: default +spec: + kubernetesVersion: "1.19" + controlPlaneConfiguration: + name: test + count: 1 + endpoint: + host: test + certSANs: ["foo.bar"] + machineGroupRef: + name: test + kind: NutanixMachineConfig + datacenterRef: + kind: NutanixDatacenterConfig + name: test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image-1-19" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" diff --git a/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_ip.yaml b/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_ip.yaml new file mode 100644 index 000000000000..ab6483f57288 --- /dev/null +++ b/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,64 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: default +spec: + kubernetesVersion: "1.19" + controlPlaneConfiguration: + name: test + count: 1 + endpoint: + host: test + certSANs: ["11.11.11.11"] + machineGroupRef: + name: test + kind: NutanixMachineConfig + datacenterRef: + kind: NutanixDatacenterConfig + name: test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image-1-19" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" diff --git a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 000000000000..599e73371291 --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,189 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "test" + namespace: "eksa-system" +spec: + prismCentral: + address: "prism.nutanix.com" + port: 9440 + insecure: false + credentialRef: + name: "capx-test" + kind: Secret + controlPlaneEndpoint: + host: "test" + port: 6443 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "test" + name: "test" + namespace: "eksa-system" +spec: + clusterNetwork: + services: + cidrBlocks: [10.96.0.0/12] + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "test" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "test" + namespace: "eksa-system" +spec: + replicas: 1 + version: "v1.19.8-eks-1-19-4" + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "" + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: "public.ecr.aws/eks-distro/kubernetes" + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + - foo.bar + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "test" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "false" + - name: lb_enable + value: "false" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + useExperimentalRetryJoin: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image-1-19" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" +--- diff --git a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml new file mode 100644 index 000000000000..e4ef10ed6ceb --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,189 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "test" + namespace: "eksa-system" +spec: + prismCentral: + address: "prism.nutanix.com" + port: 9440 + insecure: false + credentialRef: + name: "capx-test" + kind: Secret + controlPlaneEndpoint: + host: "test" + port: 6443 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "test" + name: "test" + namespace: "eksa-system" +spec: + clusterNetwork: + services: + cidrBlocks: [10.96.0.0/12] + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "test" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "test" + namespace: "eksa-system" +spec: + replicas: 1 + version: "v1.19.8-eks-1-19-4" + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "" + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: "public.ecr.aws/eks-distro/kubernetes" + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + - 11.11.11.11 + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "test" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "false" + - name: lb_enable + value: "false" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + useExperimentalRetryJoin: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image-1-19" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" +--- diff --git a/pkg/providers/tinkerbell/config/template-cp.yaml b/pkg/providers/tinkerbell/config/template-cp.yaml index 2fa8a03206b4..3a94af6ade66 100644 --- a/pkg/providers/tinkerbell/config/template-cp.yaml +++ b/pkg/providers/tinkerbell/config/template-cp.yaml @@ -94,6 +94,10 @@ spec: {{- end }} {{- end}} apiServer: + {{- with .apiServerCertSANs }} + certSANs: + {{- toYaml . | nindent 8 }} + {{- end }} extraArgs: audit-policy-file: /etc/kubernetes/audit-policy.yaml audit-log-path: /var/log/kubernetes/api-audit.log @@ -410,7 +414,7 @@ spec: {{- if .upgradeRolloutStrategy }} rolloutStrategy: rollingUpdate: - maxSurge: {{.maxSurge}} + maxSurge: {{.maxSurge}} {{- end }} version: {{.kubernetesVersion}} --- diff --git a/pkg/providers/tinkerbell/template.go b/pkg/providers/tinkerbell/template.go index 1db95a791d9a..98011e5daa91 100644 --- a/pkg/providers/tinkerbell/template.go +++ b/pkg/providers/tinkerbell/template.go @@ -403,6 +403,7 @@ func buildTemplateMapCP( "clusterName": clusterSpec.Cluster.Name, "controlPlaneEndpointIp": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host, "controlPlaneReplicas": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count, + "apiServerCertSANs": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, "controlPlaneSshAuthorizedKey": controlPlaneMachineSpec.Users[0].SshAuthorizedKeys[0], "controlPlaneSshUsername": controlPlaneMachineSpec.Users[0].Name, "eksaSystemNamespace": constants.EksaSystemNamespace, diff --git a/pkg/providers/tinkerbell/template_test.go b/pkg/providers/tinkerbell/template_test.go index 09bc5f22fd99..93864d24e93d 100644 --- a/pkg/providers/tinkerbell/template_test.go +++ b/pkg/providers/tinkerbell/template_test.go @@ -2,6 +2,7 @@ package tinkerbell import ( "testing" + "time" . "github.com/onsi/gomega" @@ -130,3 +131,36 @@ func TestGenerateTemplateBuilderForMachineConfigOsImageURL(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) g.Expect(gotEtcdMachineSpec).To(Equal(expectedEtcdMachineSpec)) } + +func TestTemplateBuilder_CertSANs(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_tinkerbell_api_server_cert_san_domain_name.yaml", + Output: "testdata/expected_cluster_tinkerbell_api_server_cert_san_domain_name.yaml", + }, + { + Input: "testdata/cluster_tinkerbell_api_server_cert_san_ip.yaml", + Output: "testdata/expected_cluster_tinkerbell_api_server_cert_san_ip.yaml", + }, + } { + g := NewWithT(t) + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + cpMachineCfg, err := getControlPlaneMachineSpec(clusterSpec) + g.Expect(err).ToNot(HaveOccurred()) + + wngMachineCfgs, err := getWorkerNodeGroupMachineSpec(clusterSpec) + g.Expect(err).ToNot(HaveOccurred()) + + bldr := NewTemplateBuilder(&clusterSpec.TinkerbellDatacenter.Spec, cpMachineCfg, nil, wngMachineCfgs, "0.0.0.0", time.Now) + + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) + g.Expect(err).ToNot(HaveOccurred()) + + test.AssertContentToFile(t, string(data), tc.Output) + + } +} diff --git a/pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_domain_name.yaml b/pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_domain_name.yaml new file mode 100644 index 000000000000..cf3e516d5261 --- /dev/null +++ b/pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_domain_name.yaml @@ -0,0 +1,62 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANS: ["foo.bar"] + machineGroupRef: + name: test + kind: TinkerbellMachineConfig + datacenterRef: + kind: TinkerbellDatacenterConfig + name: test + kubernetesVersion: "1.21" + managementCluster: + name: test +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: TinkerbellDatacenterConfig +metadata: + name: test + namespace: test +spec: + tinkerbellIP: "5.6.7.8" + osImageURL: "https://ubuntu-1-21.gz" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: TinkerbellMachineConfig +metadata: + name: test + namespace: test +spec: + hardwareSelector: + type: "node" + osFamily: ubuntu + templateRef: + kind: TinkerbellTemplateConfig + name: test + users: + - name: tink-user + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: TinkerbellTemplateConfig +metadata: + name: test +spec: + template: {} diff --git a/pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_ip.yaml b/pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_ip.yaml new file mode 100644 index 000000000000..1f7bdd27f5ca --- /dev/null +++ b/pkg/providers/tinkerbell/testdata/cluster_tinkerbell_api_server_cert_san_ip.yaml @@ -0,0 +1,62 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANS: ["11.11.11.11"] + machineGroupRef: + name: test + kind: TinkerbellMachineConfig + datacenterRef: + kind: TinkerbellDatacenterConfig + name: test + kubernetesVersion: "1.21" + managementCluster: + name: test +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: TinkerbellDatacenterConfig +metadata: + name: test + namespace: test +spec: + tinkerbellIP: "5.6.7.8" + osImageURL: "https://ubuntu-1-21.gz" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: TinkerbellMachineConfig +metadata: + name: test + namespace: test +spec: + hardwareSelector: + type: "node" + osFamily: ubuntu + templateRef: + kind: TinkerbellTemplateConfig + name: test + users: + - name: tink-user + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: TinkerbellTemplateConfig +metadata: + name: test +spec: + template: {} diff --git a/pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_domain_name.yaml b/pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_domain_name.yaml new file mode 100644 index 000000000000..4cb6e6625d4b --- /dev/null +++ b/pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_domain_name.yaml @@ -0,0 +1,342 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellCluster + name: test +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - foo.bar + extraArgs: + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + feature-gates: ServiceLoadBalancerClass=true + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: PROVIDER_ID + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + joinConfiguration: + nodeRegistration: + ignorePreflightErrors: + - DirAvailable--etc-kubernetes-manifests + kubeletExtraArgs: + provider-id: PROVIDER_ID + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + # kube-vip daemon in worker node watches for LoadBalancer services. + # When there is no worker node, make kube-vip in control-plane nodes watch + - name: svc_enable + value: "true" + - name: svc_election + value: "true" + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.581 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + users: + - name: tink-user + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellMachineTemplate + name: + replicas: 1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + hardwareAffinity: + required: + - labelSelector: + matchLabels: + type: node + templateOverride: | + global_timeout: 0 + id: "" + name: "" + tasks: null + version: "" + +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellCluster +metadata: + name: test + namespace: eksa-system +spec: + imageLookupFormat: --kube-v1.21.2-eks-1-21-4.raw.gz + imageLookupBaseRegistry: / \ No newline at end of file diff --git a/pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_ip.yaml b/pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_ip.yaml new file mode 100644 index 000000000000..b497f8307d39 --- /dev/null +++ b/pkg/providers/tinkerbell/testdata/expected_cluster_tinkerbell_api_server_cert_san_ip.yaml @@ -0,0 +1,342 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellCluster + name: test +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - 11.11.11.11 + extraArgs: + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + feature-gates: ServiceLoadBalancerClass=true + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: PROVIDER_ID + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + joinConfiguration: + nodeRegistration: + ignorePreflightErrors: + - DirAvailable--etc-kubernetes-manifests + kubeletExtraArgs: + provider-id: PROVIDER_ID + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + taints: [] + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + # kube-vip daemon in worker node watches for LoadBalancer services. + # When there is no worker node, make kube-vip in control-plane nodes watch + - name: svc_enable + value: "true" + - name: svc_election + value: "true" + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.581 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + users: + - name: tink-user + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellMachineTemplate + name: + replicas: 1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + hardwareAffinity: + required: + - labelSelector: + matchLabels: + type: node + templateOverride: | + global_timeout: 0 + id: "" + name: "" + tasks: null + version: "" + +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellCluster +metadata: + name: test + namespace: eksa-system +spec: + imageLookupFormat: --kube-v1.21.2-eks-1-21-4.raw.gz + imageLookupBaseRegistry: / \ No newline at end of file diff --git a/pkg/providers/vsphere/config/template-cp.yaml b/pkg/providers/vsphere/config/template-cp.yaml index 513c547bd07a..34a92b22cad8 100644 --- a/pkg/providers/vsphere/config/template-cp.yaml +++ b/pkg/providers/vsphere/config/template-cp.yaml @@ -146,6 +146,10 @@ spec: {{- end }} {{- end}} apiServer: + {{- with .apiServerCertSANs }} + certSANs: + {{- toYaml . | nindent 8 }} + {{- end }} extraArgs: cloud-provider: external audit-policy-file: /etc/kubernetes/audit-policy.yaml diff --git a/pkg/providers/vsphere/template.go b/pkg/providers/vsphere/template.go index f8aaa86cd1d3..13682e8be242 100644 --- a/pkg/providers/vsphere/template.go +++ b/pkg/providers/vsphere/template.go @@ -151,6 +151,7 @@ func buildTemplateMapCP( "clusterName": clusterSpec.Cluster.Name, "controlPlaneEndpointIp": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host, "controlPlaneReplicas": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count, + "apiServerCertSANs": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, "kubernetesRepository": versionsBundle.KubeDistro.Kubernetes.Repository, "kubernetesVersion": versionsBundle.KubeDistro.Kubernetes.Tag, "etcdRepository": versionsBundle.KubeDistro.Etcd.Repository, diff --git a/pkg/providers/vsphere/template_test.go b/pkg/providers/vsphere/template_test.go index 9415f73b0053..5443d40829a9 100644 --- a/pkg/providers/vsphere/template_test.go +++ b/pkg/providers/vsphere/template_test.go @@ -5,13 +5,9 @@ import ( "time" . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/aws/eks-anywhere/internal/test" - v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" - "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/providers/vsphere" - "github.com/aws/eks-anywhere/pkg/utils/ptr" ) func TestVsphereTemplateBuilderGenerateCAPISpecWorkersInvalidSSHKey(t *testing.T) { @@ -53,75 +49,32 @@ func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidEtcdSSHKey(t * ) } -func invalidSSHKey() string { - return "ssh-rsa AAAA B3NzaC1K73CeQ== testemail@test.com" -} +func TestTemplateBuilder_CertSANs(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_api_server_cert_san_domain_name.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_domain_name.yaml", + }, + { + Input: "testdata/cluster_api_server_cert_san_ip.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_ip.yaml", + }, + } { + g := NewWithT(t) + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + bldr := vsphere.NewVsphereTemplateBuilder(time.Now) -func vsphereClusterSpec(opts ...test.ClusterSpecOpt) *cluster.Spec { - spec := test.NewClusterSpec(func(s *cluster.Spec) { - s.Cluster.Name = "test-cluster" - s.Cluster.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{ - Count: 3, - Endpoint: &v1alpha1.Endpoint{ - Host: "test-ip", - }, - MachineGroupRef: &v1alpha1.Ref{ - Kind: v1alpha1.VSphereMachineConfigKind, - Name: "eksa-unit-test", - }, - } - s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{ - Name: "md-0", - Count: ptr.Int(3), - MachineGroupRef: &v1alpha1.Ref{ - Kind: v1alpha1.VSphereMachineConfigKind, - Name: "eksa-unit-test", - }, - }} - s.Cluster.Spec.ClusterNetwork = v1alpha1.ClusterNetwork{ - CNIConfig: &v1alpha1.CNIConfig{Cilium: &v1alpha1.CiliumConfig{}}, - Pods: v1alpha1.Pods{ - CidrBlocks: []string{"192.168.0.0/16"}, - }, - Services: v1alpha1.Services{ - CidrBlocks: []string{"10.96.0.0/12"}, - }, - } - s.Cluster.Spec.DatacenterRef = v1alpha1.Ref{ - Kind: v1alpha1.VSphereDatacenterKind, - Name: "eksa-unit-test", - } - s.VSphereDatacenter = &v1alpha1.VSphereDatacenterConfig{ - Spec: v1alpha1.VSphereDatacenterConfigSpec{ - Datacenter: "test", - Network: "test", - Server: "test", - }, - } - s.Cluster.Spec.DatacenterRef = v1alpha1.Ref{ - Kind: v1alpha1.VSphereDatacenterKind, - Name: "vsphere test", - } - s.VSphereMachineConfigs = map[string]*v1alpha1.VSphereMachineConfig{ - "eksa-unit-test": { - ObjectMeta: metav1.ObjectMeta{ - Name: "eksa-unit-test", - }, - Spec: v1alpha1.VSphereMachineConfigSpec{ - Users: []v1alpha1.UserConfiguration{ - { - Name: "capv", - SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=="}, - }, - }, - }, - }, - } - }) + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) + g.Expect(err).ToNot(HaveOccurred()) - for _, op := range opts { - op(spec) + test.AssertContentToFile(t, string(data), tc.Output) } +} - return spec +func invalidSSHKey() string { + return "ssh-rsa AAAA B3NzaC1K73CeQ== testemail@test.com" } diff --git a/pkg/providers/vsphere/testdata/cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/vsphere/testdata/cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 000000000000..d37d4e6efc7d --- /dev/null +++ b/pkg/providers/vsphere/testdata/cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,58 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANs: ["foo.bar"] + machineGroupRef: + name: test + kind: VSphereMachineConfig + kubernetesVersion: "1.19" + datacenterRef: + kind: VSphereDatacenterConfig + name: test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test + namespace: test +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 8192 + numCPUs: 2 + osFamily: ubuntu + resourcePool: "*/Resources" + template: "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6" + users: + - name: capv + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereDatacenterConfig +metadata: + name: test + namespace: test +spec: + datacenter: "SDDC-Datacenter" + network: "/SDDC-Datacenter/network/sddc-cgw-network-1" + server: "vsphere_server" + thumbprint: "ABCDEFG" + insecure: false diff --git a/pkg/providers/vsphere/testdata/cluster_api_server_cert_san_ip.yaml b/pkg/providers/vsphere/testdata/cluster_api_server_cert_san_ip.yaml new file mode 100644 index 000000000000..13fd45293233 --- /dev/null +++ b/pkg/providers/vsphere/testdata/cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,58 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANs: ["11.11.11.11"] + machineGroupRef: + name: test + kind: VSphereMachineConfig + kubernetesVersion: "1.19" + datacenterRef: + kind: VSphereDatacenterConfig + name: test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test + namespace: test +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 8192 + numCPUs: 2 + osFamily: ubuntu + resourcePool: "*/Resources" + template: "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6" + users: + - name: capv + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereDatacenterConfig +metadata: + name: test + namespace: test +spec: + datacenter: "SDDC-Datacenter" + network: "/SDDC-Datacenter/network/sddc-cgw-network-1" + server: "vsphere_server" + thumbprint: "ABCDEFG" + insecure: false diff --git a/pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 000000000000..2bc454912b0e --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,633 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + identityRef: + kind: Secret + name: test-vsphere-credentials + server: vsphere_server + thumbprint: 'ABCDEFG' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 2 + resourcePool: '*/Resources' + server: vsphere_server + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + apiServer: + certSANs: + - foo.bar + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.2-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + useExperimentalRetryJoin: true + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 1 + version: v1.19.8-eks-1-19-4 +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-cpi + namespace: eksa-system +spec: + strategy: Reconcile + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + resources: + - kind: Secret + name: test-cloud-controller-manager + - kind: Secret + name: test-cloud-provider-vsphere-credentials + - kind: ConfigMap + name: test-cpi-manifests +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-vsphere-credentials + namespace: eksa-system + labels: + clusterctl.cluster.x-k8s.io/move: "true" +stringData: + username: "" + password: "" +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-controller-manager + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-provider-vsphere-credentials + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + vsphere_server.password: "" + vsphere_server.username: "" + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: "ABCDEFG" + insecureFlag: false + vcenter: + vsphere_server: + datacenters: + - 'SDDC-Datacenter' + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + server: 'vsphere_server' + thumbprint: 'ABCDEFG' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + kind: Service + metadata: + labels: + component: cloud-controller-manager + name: cloud-controller-manager + namespace: kube-system + spec: + ports: + - port: 443 + protocol: TCP + targetPort: 43001 + selector: + component: cloud-controller-manager + type: NodePort + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: vsphere-cloud-controller-manager + template: + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + spec: + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes/cloud-provider-vsphere/cpi/manager:v1.18.1-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + - effect: NoSchedule + key: node.kubernetes.io/not-ready + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: test-cpi-manifests + namespace: eksa-system diff --git a/pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_ip.yaml b/pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_ip.yaml new file mode 100644 index 000000000000..58004602c476 --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,633 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + identityRef: + kind: Secret + name: test-vsphere-credentials + server: vsphere_server + thumbprint: 'ABCDEFG' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 2 + resourcePool: '*/Resources' + server: vsphere_server + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + apiServer: + certSANs: + - 11.11.11.11 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.2-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + useExperimentalRetryJoin: true + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 1 + version: v1.19.8-eks-1-19-4 +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-cpi + namespace: eksa-system +spec: + strategy: Reconcile + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + resources: + - kind: Secret + name: test-cloud-controller-manager + - kind: Secret + name: test-cloud-provider-vsphere-credentials + - kind: ConfigMap + name: test-cpi-manifests +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-vsphere-credentials + namespace: eksa-system + labels: + clusterctl.cluster.x-k8s.io/move: "true" +stringData: + username: "" + password: "" +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-controller-manager + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-provider-vsphere-credentials + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + vsphere_server.password: "" + vsphere_server.username: "" + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: "ABCDEFG" + insecureFlag: false + vcenter: + vsphere_server: + datacenters: + - 'SDDC-Datacenter' + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + server: 'vsphere_server' + thumbprint: 'ABCDEFG' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + kind: Service + metadata: + labels: + component: cloud-controller-manager + name: cloud-controller-manager + namespace: kube-system + spec: + ports: + - port: 443 + protocol: TCP + targetPort: 43001 + selector: + component: cloud-controller-manager + type: NodePort + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: vsphere-cloud-controller-manager + template: + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + spec: + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes/cloud-provider-vsphere/cpi/manager:v1.18.1-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + - effect: NoSchedule + key: node.kubernetes.io/not-ready + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: test-cpi-manifests + namespace: eksa-system diff --git a/pkg/templater/templater.go b/pkg/templater/templater.go index afcd82e6f676..a670947ce7cd 100644 --- a/pkg/templater/templater.go +++ b/pkg/templater/templater.go @@ -6,6 +6,9 @@ import ( "strings" "text/template" + "github.com/Masterminds/sprig" + "sigs.k8s.io/yaml" + "github.com/aws/eks-anywhere/pkg/filewriter" ) @@ -42,25 +45,34 @@ func (t *Templater) WriteBytesToFile(content []byte, fileName string, f ...filew } func Execute(templateContent string, data interface{}) ([]byte, error) { - temp := template.New("tmpl") - funcMap := map[string]interface{}{ - "indent": func(spaces int, v string) string { - pad := strings.Repeat(" ", spaces) - return pad + strings.Replace(v, "\n", "\n"+pad, -1) - }, + // Apply sprig functions for easy templating. + // See https://masterminds.github.io/sprig/ for a list of available functions. + fns := sprig.TxtFuncMap() + for k, v := range map[string]any{ "stringsJoin": strings.Join, + "toYaml": toYAML, + } { + fns[k] = v } - temp = temp.Funcs(funcMap) - temp, err := temp.Parse(templateContent) + tpl := template.New("").Funcs(fns) + tpl, err := tpl.Parse(templateContent) if err != nil { return nil, fmt.Errorf("parsing template: %v", err) } var buf bytes.Buffer - err = temp.Execute(&buf, data) - if err != nil { + if err := tpl.Execute(&buf, data); err != nil { return nil, fmt.Errorf("substituting values for template: %v", err) } + return buf.Bytes(), nil } + +func toYAML(v any) string { + data, err := yaml.Marshal(v) + if err != nil { + return "" + } + return strings.TrimSuffix(string(data), "\n") +}