diff --git a/pkg/providers/cloudstack/config/template-cp.yaml b/pkg/providers/cloudstack/config/template-cp.yaml index a55be33d4c07b..d9c2e6e827f1a 100644 --- a/pkg/providers/cloudstack/config/template-cp.yaml +++ b/pkg/providers/cloudstack/config/template-cp.yaml @@ -87,6 +87,10 @@ spec: imageRepository: {{.corednsRepository}} imageTag: {{.corednsVersion}} apiServer: + {{- with .apiServerCertSANs }} + certSANs: + {{- toYaml . | nindent 8 }} + {{- end }} extraArgs: cloud-provider: external audit-policy-file: /etc/kubernetes/audit-policy.yaml diff --git a/pkg/providers/cloudstack/template.go b/pkg/providers/cloudstack/template.go index 78945e855ce82..68b9fad224040 100644 --- a/pkg/providers/cloudstack/template.go +++ b/pkg/providers/cloudstack/template.go @@ -143,6 +143,7 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro "controlPlaneEndpointHost": host, "controlPlaneEndpointPort": port, "controlPlaneReplicas": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count, + "apiServerCertSANs": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, "kubernetesRepository": versionsBundle.KubeDistro.Kubernetes.Repository, "kubernetesVersion": versionsBundle.KubeDistro.Kubernetes.Tag, "etcdRepository": versionsBundle.KubeDistro.Etcd.Repository, diff --git a/pkg/providers/cloudstack/template_test.go b/pkg/providers/cloudstack/template_test.go index 3572a2d154d4d..76c4043d37157 100644 --- a/pkg/providers/cloudstack/template_test.go +++ b/pkg/providers/cloudstack/template_test.go @@ -141,3 +141,31 @@ func TestTemplateBuilderGenerateCAPISpecWorkersInvalidEndpoint(t *testing.T) { _, err := templateBuilder.GenerateCAPISpecWorkers(clusterSpec, machineTemplateNames, kubeadmConfigTemplateNames) g.Expect(err).To(MatchError(ContainSubstring("building template map for MD host 1.1.1.1:: is invalid: address 1.1.1.1::: too many colons in address"))) } + +func TestTemplateBuilder_CertSANs(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_api_server_cert_san_domain_name.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_domain_name.yaml", + }, + { + Input: "testdata/cluster_api_server_cert_san_ip.yaml", + Output: "testdata/expected_cluster_api_server_cert_san_ip.yaml", + }, + } { + g := NewWithT(t) + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + bldr := cloudstack.NewTemplateBuilder(time.Now) + + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec, func(values map[string]interface{}) { + values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(clusterSpec.Cluster) + }) + g.Expect(err).ToNot(HaveOccurred()) + + test.AssertContentToFile(t, string(data), tc.Output) + } +} diff --git a/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 0000000000000..2f89aad24aace --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,69 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANs: ["foo.bar"] + machineGroupRef: + kind: CloudStackMachineConfig + name: test + datacenterRef: + kind: CloudStackDatacenterConfig + name: test + kubernetesVersion: "1.21" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackDatacenterConfig +metadata: + name: test + namespace: test +spec: + availabilityZones: + - account: "admin" + domain: "domain1" + name: "default-az-0" + credentialsRef: "global" + zone: + name: "zone1" + network: + name: "net1" + managementApiEndpoint: "http://127.16.0.1:8080/client/api" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackMachineConfig +metadata: + name: test + namespace: test +spec: + computeOffering: + name: "m4-large" + users: + - name: "mySshUsername" + sshAuthorizedKeys: # The key below was manually generated and not used in any production systems + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" + template: + name: "kubernetes_1_21" + diskOffering: + name: "Small" + mountPath: "/data-small" + device: "/dev/vdb" + filesystem: "ext4" + label: "data_disk" + symlinks: + /var/log/kubernetes: /data-small/var/log/kubernetes + affinityGroupIds: + - control-plane-anti-affinity \ No newline at end of file diff --git a/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_ip.yaml b/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_ip.yaml new file mode 100644 index 0000000000000..1e6c951d30a36 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,70 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test +spec: + clusterNetwork: + cniConfig: + cilium: {} + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 1 + endpoint: + host: 0.0.0.0 + certSANs: ["11.11.11.11"] + machineGroupRef: + kind: CloudStackMachineConfig + name: test + datacenterRef: + kind: CloudStackDatacenterConfig + name: test + kubernetesVersion: "1.21" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackDatacenterConfig +metadata: + name: test + namespace: test +spec: + availabilityZones: + - account: "admin" + domain: "domain1" + name: "default-az-0" + credentialsRef: "global" + zone: + name: "zone1" + network: + name: "net1" + managementApiEndpoint: "http://127.16.0.1:8080/client/api" + +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackMachineConfig +metadata: + name: test + namespace: test +spec: + computeOffering: + name: "m4-large" + users: + - name: "mySshUsername" + sshAuthorizedKeys: # The key below was manually generated and not used in any production systems + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" + template: + name: "kubernetes_1_21" + diskOffering: + name: "Small" + mountPath: "/data-small" + device: "/dev/vdb" + filesystem: "ext4" + label: "data_disk" + symlinks: + /var/log/kubernetes: /data-small/var/log/kubernetes + affinityGroupIds: + - control-plane-anti-affinity \ No newline at end of file diff --git a/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_domain_name.yaml new file mode 100644 index 0000000000000..77cd30fe30f56 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_domain_name.yaml @@ -0,0 +1,406 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackCluster + name: test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + failureDomains: + - name: default-az-0 + zone: + id: + name: zone1 + network: + id: + name: net1 + domain: domain1 + account: admin + acsEndpoint: + name: global + namespace: eksa-system +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackMachineTemplate + name: test-control-plane-1 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - foo.bar + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.158 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + preKubeadmCommands: + - swapoff -a + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + - >- + if [ ! -L /var/log/kubernetes ] ; + then + mv /var/log/kubernetes /var/log/kubernetes-$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10) ; + mkdir -p /data-small/var/log/kubernetes && ln -s /data-small/var/log/kubernetes /var/log/kubernetes ; + else echo "/var/log/kubernetes already symlnk"; + fi + diskSetup: + filesystems: + - device: /dev/vdb1 + overwrite: false + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: data_disk + partitions: + - device: /dev/vdb + layout: true + overwrite: false + tableType: gpt + mounts: + - - LABEL=data_disk + - /data-small + useExperimentalRetryJoin: true + users: + - name: mySshUsername + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackMachineTemplate +metadata: + annotations: + device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /dev/vdb + filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: ext4 + label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: data_disk + mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /data-small + symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /var/log/kubernetes:/data-small/var/log/kubernetes + creationTimestamp: null + name: test-control-plane-1 + namespace: eksa-system +spec: + template: + spec: + affinityGroupIDs: + - control-plane-anti-affinity + diskOffering: + customSizeInGB: 0 + device: /dev/vdb + filesystem: ext4 + label: data_disk + mountPath: /data-small + name: Small + offering: + name: m4-large + sshKey: "" + template: + name: kubernetes_1_21 + +--- diff --git a/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_ip.yaml b/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_ip.yaml new file mode 100644 index 0000000000000..12cf386ce5a63 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/expected_cluster_api_server_cert_san_ip.yaml @@ -0,0 +1,406 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackCluster + name: test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + failureDomains: + - name: default-az-0 + zone: + id: + name: zone1 + network: + id: + name: net1 + domain: domain1 + account: admin + acsEndpoint: + name: global + namespace: eksa-system +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackMachineTemplate + name: test-control-plane-1 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - 11.11.11.11 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.158 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + preKubeadmCommands: + - swapoff -a + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + - >- + if [ ! -L /var/log/kubernetes ] ; + then + mv /var/log/kubernetes /var/log/kubernetes-$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10) ; + mkdir -p /data-small/var/log/kubernetes && ln -s /data-small/var/log/kubernetes /var/log/kubernetes ; + else echo "/var/log/kubernetes already symlnk"; + fi + diskSetup: + filesystems: + - device: /dev/vdb1 + overwrite: false + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: data_disk + partitions: + - device: /dev/vdb + layout: true + overwrite: false + tableType: gpt + mounts: + - - LABEL=data_disk + - /data-small + useExperimentalRetryJoin: true + users: + - name: mySshUsername + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackMachineTemplate +metadata: + annotations: + device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /dev/vdb + filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: ext4 + label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: data_disk + mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /data-small + symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /var/log/kubernetes:/data-small/var/log/kubernetes + creationTimestamp: null + name: test-control-plane-1 + namespace: eksa-system +spec: + template: + spec: + affinityGroupIDs: + - control-plane-anti-affinity + diskOffering: + customSizeInGB: 0 + device: /dev/vdb + filesystem: ext4 + label: data_disk + mountPath: /data-small + name: Small + offering: + name: m4-large + sshKey: "" + template: + name: kubernetes_1_21 + +---