diff --git a/.github/actions/k3d/action.yaml b/.github/actions/k3d/action.yaml new file mode 100644 index 00000000..0d6fd283 --- /dev/null +++ b/.github/actions/k3d/action.yaml @@ -0,0 +1,8 @@ +name: setup-k3d +description: "Install k3d and create a cluster" + +runs: + using: composite + steps: + - run: "curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash" + shell: bash diff --git a/.github/kubectl-patch/nodeselector-pod-toleration.yaml b/.github/kubectl-patch/nodeselector-pod-toleration.yaml new file mode 100644 index 00000000..f9b04fef --- /dev/null +++ b/.github/kubectl-patch/nodeselector-pod-toleration.yaml @@ -0,0 +1,8 @@ +spec: + tolerations: + - key: "testkey" + operator: "Equal" + value: "testvalue" + effect: "NoSchedule" + nodeSelector: + testkey: "testvalue" diff --git a/.github/kubectl-patch/nodeselector-toleration.yaml b/.github/kubectl-patch/nodeselector-toleration.yaml new file mode 100644 index 00000000..51bb76d7 --- /dev/null +++ b/.github/kubectl-patch/nodeselector-toleration.yaml @@ -0,0 +1,10 @@ +spec: + template: + spec: + nodeSelector: + testkey: "testvalue" + tolerations: + - key: "testkey" + operator: "Equal" + value: "testvalue" + effect: "NoSchedule" diff --git a/.github/workflows/e2e-with-cluster.yaml b/.github/workflows/e2e-with-cluster.yaml index 3bc18f4e..1b867c6f 100644 --- a/.github/workflows/e2e-with-cluster.yaml +++ b/.github/workflows/e2e-with-cluster.yaml @@ -11,22 +11,44 @@ permissions: id-token: write jobs: - minikube: - name: Run E2E tests with Minikube + minikube-default: + name: Run e2e tests against Minikube if: ${{ github.event_name == 'pull_request' && github.event.action != 'closed' }} runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v3 - - name: Setup Minikube - run: minikube start --driver=docker + - name: Setup Flux CLI + uses: fluxcd/flux2/action@main - - name: Install FluxCD + - name: Setup k3d + uses: ./.github/actions/k3d + + - name: Run e2e tests against current cluster run: | - curl -s https://fluxcd.io/install.sh | sudo bash - flux install --namespace=flux-system --components="source-controller,helm-controller" - - - name: Run e2e tests + make test-e2e-with-cluster-local + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + + minikube-tainted: + name: Run e2e tests against Minikube (TAINTED) + if: ${{ github.event_name == 'pull_request' && github.event.action != 'closed' }} + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Setup Flux CLI + uses: fluxcd/flux2/action@main + + - name: Setup k3d + uses: ./.github/actions/k3d + + - name: Run e2e tests against current tainted cluster run: | - make e2e-test-with-cluster + make test-e2e-with-tainted-cluster-local + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 diff --git a/.github/workflows/pull_request.yaml b/.github/workflows/pull_request.yaml index 8547113e..9332c0f0 100644 --- a/.github/workflows/pull_request.yaml +++ b/.github/workflows/pull_request.yaml @@ -17,8 +17,10 @@ jobs: run: | (cd ./chart && helm dep update .) helm lint ./chart --with-subcharts + - name: Test run: | go test -coverprofile=coverage.txt -covermode=atomic -v ./src/controllers/... + - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v3 diff --git a/Makefile b/Makefile index 5d2b985c..04ba0a82 100644 --- a/Makefile +++ b/Makefile @@ -93,7 +93,7 @@ help: ## Display this help. .PHONY: manifests manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. - $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=src/config/crd/bases .PHONY: generate generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. @@ -107,15 +107,75 @@ fmt: ## Run go fmt against code. vet: ## Run go vet against code. go vet ./... +.PHONY: install-flux-prereq +install-flux-prereq: ## Install the fluxcd if not preset + which flux || curl -s https://fluxcd.io/install.sh | sudo bash + +.PHONY: install-fluxcd-controllers +install-fluxcd-controllers: install-flux-prereq ## Install the fluxcd controllers. + flux install --namespace=flux-system --components="source-controller,helm-controller" --network-policy=false --insecure-skip-tls-verify + +.PHONE: install-fluxcd-controllers-with-toleration +install-fluxcd-controllers-with-toleration: install-flux-prereq ## Install the fluxcd controllers with toleration. + flux install --namespace=flux-system --components="source-controller,helm-controller" --toleration-keys="testkey" --network-policy=false --insecure-skip-tls-verify + +.PHONY: start-test-k3d +start-test-k3d: ## Start a k3d cluster for testing. + k3d cluster create basic agents=1 + $(MAKE) install-fluxcd-controllers + +.PHONY: start-test-minikube +start-test-minikube: ## Start a minikube cluster for testing. + minikube start --addons default-storageclass,storage-provisioner --driver=docker + kubectl taint nodes minikube testkey- || true + kubectl label nodes minikube testkey- || true + $(MAKE) install-fluxcd-controllers + +.PHONY: stop-test-minikube +stop-test-minikube: ## Stop the minikube cluster for testing. + minikube stop + +.PHONY: start-test-minikube-tainted +start-test-minikube-tainted: ## Start a minikube cluster with a tainted node for testing. + minikube start --addons default-storageclass,storage-provisioner,hostpat --driver=docker + sh ./hack/minikube-patch-pod-tolerations.sh + kubectl taint nodes minikube testkey=testvalue:NoSchedule || true + kubectl label nodes minikube testkey=testvalue || true + $(MAKE) install-fluxcd-controllers-with-toleration + sh ./hack/minikube-patch-workload-tolerations.sh + +.PHONY : stop-test-k3d +stop-test-k3d: ## Stop the k3d cluster for testing. + k3d cluster delete basic + +.PHONY: start-test-k3d-tainted +start-test-k3d-tainted: ## Start a k3d cluster with a tainted node for testing. + k3d cluster create tainted --agents=1 --k3s-arg="--kubelet-arg=node-labels=testkey=testvalue@agent:0" --k3s-arg="--kubelet-arg=taints=testkey=testvalue:NoSchedule@agent:0" + $(MAKE) install-fluxcd-controllers + +.PHONY : stop-test-k3d-tainted +stop-test-k3d-tainted: ## Stop the k3d cluster with a tainted node for testing. + k3d cluster delete tainted + ##@ Test -.PHONY: e2e-test-without-cluster -e2e-test-without-cluster: manifests generate fmt vet envtest ## Run test. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile cover.out +.PHONY: test-e2e-without-cluster +test-e2e-without-cluster: manifests generate fmt vet envtest ## Run test. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile=coverage.txt + +.PHONY: test-e2e-with-cluster +test-e2e-with-cluster: manifests generate fmt vet envtest ## Run test. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" ENVTEST_REMOTE=true go test ./... -coverprofile=coverage.txt -v + +.PHONY: test-e2e-with-cluster-local +test-e2e-with-cluster-local: start-test-minikube test-e2e-with-cluster ## Run test. + +.PHONY: test-e2e-with-tainted-cluster +test-e2e-with-tainted-cluster: manifests generate fmt vet envtest ## Run test. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" ENVTEST_REMOTE=true E2E_ARG_IS_TAINTED=true go test ./... -coverprofile=coverage.txt -v -.PHONY: e2e-test-with-cluster -e2e-test-with-cluster: manifests generate fmt vet envtest ## Run test. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" ENVTEST_REMOTE=true go test ./... -coverprofile cover.out -v +.PHONY: test-e2e-with-tainted-cluster-local +test-e2e-with-tainted-cluster-local: start-test-minikube-tainted test-e2e-with-tainted-cluster ## Run test. ##@ Build diff --git a/chart/templates/uffizziclusters.uffizzi.com_customresourcedefinition.yaml b/chart/templates/uffizziclusters.uffizzi.com_customresourcedefinition.yaml index 8832a205..23a235b1 100644 --- a/chart/templates/uffizziclusters.uffizzi.com_customresourcedefinition.yaml +++ b/chart/templates/uffizziclusters.uffizzi.com_customresourcedefinition.yaml @@ -148,6 +148,10 @@ spec: type: object manifests: type: string + nodeSelector: + additionalProperties: + type: string + type: object resourceQuota: description: UffizziClusterResourceQuota defines the resource quota which defines the quota of resources a namespace has access to properties: @@ -199,7 +203,7 @@ spec: default: 1Gi type: string storage: - default: 5Gi + default: 10Gi type: string type: object services: @@ -216,8 +220,37 @@ spec: type: object sleep: type: boolean - ttl: - type: string + storage: + properties: + persistence: + default: true + type: boolean + size: + default: 5Gi + type: string + type: object + tolerations: + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array type: object status: description: UffizziClusterStatus defines the observed state of UffizziCluster diff --git a/config/crd/bases/uffizzi.com_uffizziclusters.yaml b/config/crd/bases/uffizzi.com_uffizziclusters.yaml deleted file mode 100644 index 500cc20b..00000000 --- a/config/crd/bases/uffizzi.com_uffizziclusters.yaml +++ /dev/null @@ -1,345 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.11.1 - creationTimestamp: null - name: uffizziclusters.uffizzi.com -spec: - group: uffizzi.com - names: - kind: UffizziCluster - listKind: UffizziClusterList - plural: uffizziclusters - shortNames: - - uc - - ucluster - singular: uffizzicluster - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='APIReady')].status - name: APIReady - type: string - - jsonPath: .status.conditions[?(@.type=='DataStoreReady')].status - name: DataStoreReady - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - jsonPath: .status.conditions[?(@.type=='Sleep')].status - name: Sleep - type: string - - jsonPath: .status.host - name: Host - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .status.lastAwakeTime - name: UptimeSinceLastAwake - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: UffizziCluster is the Schema for the UffizziClusters API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: UffizziClusterSpec defines the desired state of UffizziCluster - properties: - apiServer: - description: UffizziClusterAPIServer defines the API server capabilities - of the cluster - properties: - image: - type: string - type: object - distro: - default: k3s - enum: - - k3s - - k8s - type: string - externalDatastore: - default: sqlite - enum: - - etcd - - sqlite - type: string - helm: - items: - properties: - chart: - properties: - name: - type: string - repo: - type: string - version: - type: string - required: - - name - - repo - type: object - release: - properties: - name: - type: string - namespace: - type: string - required: - - name - - namespace - type: object - values: - type: string - required: - - chart - - release - type: object - type: array - ingress: - description: UffiClusterIngress defines the ingress capabilities of - the cluster, the basic host can be setup for all - properties: - class: - type: string - host: - type: string - type: object - limitRange: - properties: - default: - properties: - cpu: - default: "0.5" - type: string - ephemeralStorage: - default: 8Gi - type: string - memory: - default: 1Gi - type: string - type: object - defaultRequest: - properties: - cpu: - default: "0.1" - type: string - ephemeralStorage: - default: 1Gi - type: string - memory: - default: 128Mi - type: string - type: object - enabled: - default: true - type: boolean - required: - - enabled - type: object - manifests: - type: string - provider: - default: vanila - enum: - - vanila - - gke - - eks - type: string - resourceQuota: - description: UffizziClusterResourceQuota defines the resource quota - which defines the quota of resources a namespace has access to - properties: - count: - properties: - configMaps: - default: 20 - type: integer - endpoints: - default: 10 - type: integer - persistentVolumeClaims: - default: 10 - type: integer - pods: - default: 20 - type: integer - secrets: - default: 20 - type: integer - services: - default: 10 - type: integer - type: object - enabled: - default: true - type: boolean - limits: - properties: - cpu: - default: "0.5" - type: string - ephemeralStorage: - default: 5Gi - type: string - memory: - default: 8Gi - type: string - type: object - requests: - properties: - cpu: - default: "0.5" - type: string - ephemeralStorage: - default: 5Gi - type: string - memory: - default: 1Gi - type: string - storage: - default: 5Gi - type: string - type: object - services: - properties: - loadBalancers: - default: 3 - type: integer - nodePorts: - default: 0 - type: integer - type: object - required: - - enabled - type: object - sleep: - type: boolean - ttl: - type: string - type: object - status: - description: UffizziClusterStatus defines the observed state of UffizziCluster - properties: - conditions: - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - helmReleaseRef: - type: string - host: - type: string - kubeConfig: - description: VClusterKubeConfig is the KubeConfig SecretReference - of the related VCluster - properties: - secretRef: - description: SecretKeyReference contains enough information to - locate the referenced Kubernetes Secret object in the same namespace. - Optionally a key can be specified. Use this type instead of - core/v1 SecretKeySelector when the Key is optional and the Optional - field is not applicable. - properties: - key: - description: Key in the Secret, when not specified an implementation-specific - default key is used. - type: string - name: - description: Name of the Secret. - type: string - required: - - name - type: object - type: object - lastAppliedConfiguration: - type: string - lastAppliedHelmReleaseSpec: - type: string - lastAwakeTime: - format: date-time - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml deleted file mode 100644 index dde522ed..00000000 --- a/config/rbac/role.yaml +++ /dev/null @@ -1,169 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: manager-role -rules: -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - helm.toolkit.fluxcd.io - resources: - - helmreleases - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - helm.toolkit.fluxcd.io - resources: - - helmreleases/finalizers - verbs: - - update -- apiGroups: - - helm.toolkit.fluxcd.io - resources: - - helmreleases/status - verbs: - - get - - patch - - update -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - networking.k8s.io - resources: - - networkpolicies - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmrepositories - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmrepositories/finalizers - verbs: - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmrepositories/status - verbs: - - get - - patch - - update -- apiGroups: - - uffizzi.com - resources: - - uffizziclusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - uffizzi.com - resources: - - uffizziclusters/finalizers - verbs: - - update -- apiGroups: - - uffizzi.com - resources: - - uffizziclusters/status - verbs: - - get - - patch - - update diff --git a/examples/k3s-nodeselector-tolerations.yml b/examples/k3s-nodeselector-tolerations.yml new file mode 100644 index 00000000..98962f52 --- /dev/null +++ b/examples/k3s-nodeselector-tolerations.yml @@ -0,0 +1,13 @@ +kind: UffizziCluster +apiVersion: uffizzi.com/v1alpha1 +metadata: + name: nodeselector-tolerations +spec: + nodeSelector: + testkey: testvalue + tolerations: + - key: "testkey" + operator: "Equal" + value: "testvalue" + effect: "NoSchedule" + diff --git a/hack/minikube-patch-pod-tolerations.sh b/hack/minikube-patch-pod-tolerations.sh new file mode 100755 index 00000000..24cbd8ca --- /dev/null +++ b/hack/minikube-patch-pod-tolerations.sh @@ -0,0 +1,5 @@ +kubectl get pods --all-namespaces -o jsonpath="{range .items[*]}{.metadata.namespace}{' '}{.metadata.name}{'\n'}{end}" | while read -r line; do + namespace=$(echo "$line" | cut -d' ' -f1) + pod=$(echo "$line" | cut -d' ' -f2) + kubectl patch pod "$pod" -n "$namespace" --type='json' -p='[{"op": "add", "path": "/spec/tolerations", "value": [{"key": "testkey", "operator": "Equal", "value": "testvalue", "effect": "NoSchedule"}]}]' +done \ No newline at end of file diff --git a/hack/minikube-patch-workload-tolerations.sh b/hack/minikube-patch-workload-tolerations.sh new file mode 100755 index 00000000..4b0fac1b --- /dev/null +++ b/hack/minikube-patch-workload-tolerations.sh @@ -0,0 +1,11 @@ +kubectl get deployments --all-namespaces -o jsonpath="{range .items[*]}{.metadata.namespace}{' '}{.metadata.name}{'\n'}{end}" | while read -r line; do + namespace=$(echo "$line" | cut -d' ' -f1) + deployment=$(echo "$line" | cut -d' ' -f2) + kubectl patch deployment "$deployment" --patch-file="./.github/kubectl-patch/nodeselector-toleration.yaml" -n "$namespace" +done + +kubectl get statefulset --all-namespaces -o jsonpath="{range .items[*]}{.metadata.namespace}{' '}{.metadata.name}{'\n'}{end}" | while read -r line; do + namespace=$(echo "$line" | cut -d' ' -f1) + statefulset=$(echo "$line" | cut -d' ' -f2) + kubectl patch statefulset "$statefulset" --patch-file="./.github/kubectl-patch/nodeselector-toleration.yaml" -n "$namespace" +done diff --git a/kuttl-test.yaml b/kuttl-test.yaml deleted file mode 100644 index 4f8a81f1..00000000 --- a/kuttl-test.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestSuite -testDirs: - - ./test/e2e/ -startKIND: false -timeout: 120 -skipDelete: false diff --git a/main.go b/main.go index d1cd083c..00ee1f94 100644 --- a/main.go +++ b/main.go @@ -105,7 +105,7 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "UffizziCluster") os.Exit(1) } - // Setup UffizziClusterReconciler + // Setup UffizziClusterEtcdReconciler if err = (&etcd.UffizziClusterEtcdReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), diff --git a/src/api/v1alpha1/uffizzicluster_types.go b/src/api/v1alpha1/uffizzicluster_types.go index 30200ab2..bc488e71 100644 --- a/src/api/v1alpha1/uffizzicluster_types.go +++ b/src/api/v1alpha1/uffizzicluster_types.go @@ -18,6 +18,7 @@ package v1alpha1 import ( "github.com/fluxcd/pkg/apis/meta" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -86,7 +87,7 @@ type UffizziClusterRequestsQuota struct { Memory string `json:"memory,omitempty"` //+kubebuilder:default:="5Gi" EphemeralStorage string `json:"ephemeralStorage,omitempty"` - //+kubebuilder:default:="5Gi" + //+kubebuilder:default:="10Gi" Storage string `json:"storage,omitempty"` } @@ -139,23 +140,28 @@ type UffizziClusterResourceCount struct { Endpoints int `json:"endpoints,omitempty"` } +type UffizziClusterStorage struct { + //+kubebuilder:default:=true + Persistence bool `json:"persistence,omitempty"` + //+kubebuilder:default:="5Gi" + Size string `json:"size,omitempty"` +} + // UffizziClusterSpec defines the desired state of UffizziCluster type UffizziClusterSpec struct { //+kubebuilder:default:="k3s" //+kubebuilder:validation:Enum=k3s;k8s - Distro string `json:"distro,omitempty"` - //+kubebuilder:default:="vanila" - //+kubebuilder:validation:Enum=vanila;gke;eks - Provider string `json:"provider,omitempty"` + Distro string `json:"distro,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Toleration []v1.Toleration `json:"tolerations,omitempty"` APIServer UffizziClusterAPIServer `json:"apiServer,omitempty"` Ingress UffizziClusterIngress `json:"ingress,omitempty"` - TTL string `json:"ttl,omitempty"` Helm []HelmChart `json:"helm,omitempty"` Manifests *string `json:"manifests,omitempty"` ResourceQuota *UffizziClusterResourceQuota `json:"resourceQuota,omitempty"` LimitRange *UffizziClusterLimitRange `json:"limitRange,omitempty"` Sleep bool `json:"sleep,omitempty"` - + Storage *UffizziClusterStorage `json:"storage,omitempty"` //+kubebuilder:default:="sqlite" //+kubebuilder:validation:Enum=etcd;sqlite ExternalDatastore string `json:"externalDatastore,omitempty"` diff --git a/src/api/v1alpha1/zz_generated.deepcopy.go b/src/api/v1alpha1/zz_generated.deepcopy.go index da3bff52..6ddbceaf 100644 --- a/src/api/v1alpha1/zz_generated.deepcopy.go +++ b/src/api/v1alpha1/zz_generated.deepcopy.go @@ -23,7 +23,8 @@ package v1alpha1 import ( "github.com/fluxcd/pkg/apis/meta" - "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -307,6 +308,20 @@ func (in *UffizziClusterServicesQuota) DeepCopy() *UffizziClusterServicesQuota { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UffizziClusterSpec) DeepCopyInto(out *UffizziClusterSpec) { *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Toleration != nil { + in, out := &in.Toleration, &out.Toleration + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } out.APIServer = in.APIServer out.Ingress = in.Ingress if in.Helm != nil { @@ -329,6 +344,11 @@ func (in *UffizziClusterSpec) DeepCopyInto(out *UffizziClusterSpec) { *out = new(UffizziClusterLimitRange) **out = **in } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(UffizziClusterStorage) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UffizziClusterSpec. @@ -346,7 +366,7 @@ func (in *UffizziClusterStatus) DeepCopyInto(out *UffizziClusterStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -385,6 +405,21 @@ func (in *UffizziClusterStatus) DeepCopy() *UffizziClusterStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UffizziClusterStorage) DeepCopyInto(out *UffizziClusterStorage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UffizziClusterStorage. +func (in *UffizziClusterStorage) DeepCopy() *UffizziClusterStorage { + if in == nil { + return nil + } + out := new(UffizziClusterStorage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VClusterIngressSpec) DeepCopyInto(out *VClusterIngressSpec) { *out = *in diff --git a/src/config/crd/bases/uffizzi.com_uffizziclusters.yaml b/src/config/crd/bases/uffizzi.com_uffizziclusters.yaml index 0df7493c..57c91498 100644 --- a/src/config/crd/bases/uffizzi.com_uffizziclusters.yaml +++ b/src/config/crd/bases/uffizzi.com_uffizziclusters.yaml @@ -154,6 +154,10 @@ spec: type: object manifests: type: string + nodeSelector: + additionalProperties: + type: string + type: object resourceQuota: description: UffizziClusterResourceQuota defines the resource quota which defines the quota of resources a namespace has access to @@ -206,7 +210,7 @@ spec: default: 1Gi type: string storage: - default: 5Gi + default: 10Gi type: string type: object services: @@ -223,8 +227,54 @@ spec: type: object sleep: type: boolean - ttl: - type: string + storage: + properties: + persistence: + default: true + type: boolean + size: + default: 5Gi + type: string + type: object + tolerations: + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array type: object status: description: UffizziClusterStatus defines the observed state of UffizziCluster diff --git a/src/controllers/etcd/etcd_controller.go b/src/controllers/etcd/etcd_controller.go index 03e9f50b..99825a87 100644 --- a/src/controllers/etcd/etcd_controller.go +++ b/src/controllers/etcd/etcd_controller.go @@ -18,11 +18,13 @@ package etcd import ( "context" + "fmt" uclusteruffizzicomv1alpha1 "github.com/UffizziCloud/uffizzi-cluster-operator/src/api/v1alpha1" "github.com/UffizziCloud/uffizzi-cluster-operator/src/pkg/constants" fluxhelmv2beta1 "github.com/fluxcd/helm-controller/api/v2beta1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -37,9 +39,6 @@ type UffizziClusterEtcdReconciler struct { } func (r *UffizziClusterEtcdReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - // check if the cluster is a k3s cluster - // if it is, then we need to create the etcd cluster - // default lifecycle operation logger := log.FromContext(ctx) // ---------------------- @@ -62,21 +61,21 @@ func (r *UffizziClusterEtcdReconciler) Reconcile(ctx context.Context, req ctrl.R // create a helm release for the etcd cluster // check if the helm release exists helmRelease := &fluxhelmv2beta1.HelmRelease{} - err = r.Get(ctx, client.ObjectKey{ + err = r.Get(ctx, types.NamespacedName{ Namespace: uCluster.Namespace, Name: BuildEtcdHelmReleaseName(uCluster), }, helmRelease) if err != nil { - // if the helm release does not exist, create it - _, err = r.upsertETCDHelmRelease(ctx, uCluster) - if err != nil { - return ctrl.Result{}, err + if k8serrors.IsNotFound(err) { + // if the helm release does not exist, create it + if _, err = r.upsertETCDHelmRelease(ctx, uCluster); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create HelmRelease: %w", err) + } + } else { + return ctrl.Result{}, fmt.Errorf("failed to get HelmRelease: %w", err) } - } else { - return ctrl.Result{}, nil } } - return ctrl.Result{}, nil } diff --git a/src/controllers/etcd/helm.go b/src/controllers/etcd/helm.go index b02d1f7d..25a4d49a 100644 --- a/src/controllers/etcd/helm.go +++ b/src/controllers/etcd/helm.go @@ -11,6 +11,7 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" + "time" ) func (r *UffizziClusterEtcdReconciler) createHelmRepo(ctx context.Context, name, namespace, url string) error { @@ -21,8 +22,10 @@ func (r *UffizziClusterEtcdReconciler) createHelmRepo(ctx context.Context, name, Namespace: namespace, }, Spec: fluxsourcev1.HelmRepositorySpec{ - URL: url, - Type: constants.OCI_TYPE, + Interval: metav1.Duration{Duration: time.Minute * 5}, + Timeout: &metav1.Duration{Duration: time.Second * 60}, + URL: url, + Type: constants.OCI_TYPE, }, } diff --git a/src/controllers/uffizzicluster/conditions.go b/src/controllers/uffizzicluster/conditions.go index 1736fe42..77e93cd8 100644 --- a/src/controllers/uffizzicluster/conditions.go +++ b/src/controllers/uffizzicluster/conditions.go @@ -51,6 +51,18 @@ func GetAllNotReadyConditions() []metav1.Condition { } } +func GetAllSleepConditions() []metav1.Condition { + return append([]metav1.Condition{ + Sleeping(metav1.Now()), + }, GetAllNotReadyConditions()...) +} + +func GetAllAwokenConditions() []metav1.Condition { + return append([]metav1.Condition{ + Awoken(metav1.Now()), + }, GetAllReadyConditions()...) +} + func Initializing() metav1.Condition { return metav1.Condition{ Type: TypeReady, diff --git a/src/controllers/uffizzicluster/helm.go b/src/controllers/uffizzicluster/helm.go index c2b65ad9..9107d2c5 100644 --- a/src/controllers/uffizzicluster/helm.go +++ b/src/controllers/uffizzicluster/helm.go @@ -10,10 +10,12 @@ import ( fluxhelmv2beta1 "github.com/fluxcd/helm-controller/api/v2beta1" fluxsourcev1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "time" ) func (r *UffizziClusterReconciler) createLoftHelmRepo(ctx context.Context, req ctrl.Request) error { @@ -215,6 +217,12 @@ func (r *UffizziClusterReconciler) createHelmRepo(ctx context.Context, name, nam }, Spec: fluxsourcev1.HelmRepositorySpec{ URL: url, + Interval: metav1.Duration{ + Duration: time.Minute * 5, + }, + Timeout: &metav1.Duration{ + Duration: time.Second * 60, + }, }, } diff --git a/src/controllers/uffizzicluster/uffizzicluster_controller.go b/src/controllers/uffizzicluster/uffizzicluster_controller.go index b437b9ce..876d63b5 100644 --- a/src/controllers/uffizzicluster/uffizzicluster_controller.go +++ b/src/controllers/uffizzicluster/uffizzicluster_controller.go @@ -252,7 +252,7 @@ func (r *UffizziClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque // ---------------------- if err := r.reconcileSleepState(ctx, uCluster); err != nil { if k8serrors.IsNotFound(err) { - logger.Info("vcluster statefulset not found, will check again in the next round") + logger.Info("vcluster workload not found, will check again in the next round") return ctrl.Result{}, nil } // cluster did not sleep diff --git a/src/pkg/constants/constants.go b/src/pkg/constants/constants.go index a2de30c8..fd77f014 100644 --- a/src/pkg/constants/constants.go +++ b/src/pkg/constants/constants.go @@ -9,23 +9,22 @@ const ( BITNAMI_HELM_REPO = "bitnami" VCLUSTER_CHART_K3S = "vcluster" VCLUSTER_CHART_K8S = "vcluster-k8s" - VCLUSTER_CHART_K3S_VERSION = "0.16.4" - VCLUSTER_CHART_K8S_VERSION = "0.16.4" + VCLUSTER_CHART_K3S_VERSION = "0.19.4" + VCLUSTER_CHART_K8S_VERSION = "0.19.4" ETCD_CHART = "etcd" ETCD_CHART_VERSION = "9.5.6" BITNAMI_CHART_REPO_URL = "oci://registry-1.docker.io/bitnamicharts" LOFT_CHART_REPO_URL = "https://charts.loft.sh" VCLUSTER_K3S_DISTRO = "k3s" VCLUSTER_K8S_DISTRO = "k8s" - PROVIDER_NOTHING = "vanila" - PROVIDER_GKE = "gke" - PROVIDER_EKS = "eks" + NODESELECTOR_GKE = "gvisor" K3S_DATASTORE_ENDPOINT = "K3S_DATASTORE_ENDPOINT" VCLUSTER_INGRESS_HOSTNAME = "VCLUSTER_INGRESS_HOST" DEFAULT_K3S_VERSION = "rancher/k3s:v1.27.3-k3s1" UCLUSTER_SYNC_PLUGIN_TAG = "uffizzi/ucluster-sync-plugin:v0.2.4" OCI_TYPE = "oci" PREMIUM_RWO_STORAGE_CLASS = "premium-rwo" + STANDARD_STORAGE_CLASS = "standard" SANDBOX_GKE_IO_RUNTIME = "sandbox.gke.io/runtime" GVISOR = "gvisor" VCLUSTER_MANAGED_BY_KEY = "vcluster.loft.sh/managed-by" diff --git a/src/pkg/helm/build/etcd/build.go b/src/pkg/helm/build/etcd/build.go index 2828cd87..87ba43bf 100644 --- a/src/pkg/helm/build/etcd/build.go +++ b/src/pkg/helm/build/etcd/build.go @@ -4,13 +4,12 @@ import ( "github.com/UffizziCloud/uffizzi-cluster-operator/src/pkg/constants" helmtypes "github.com/UffizziCloud/uffizzi-cluster-operator/src/pkg/helm/types" etcdhelmtypes "github.com/UffizziCloud/uffizzi-cluster-operator/src/pkg/helm/types/etcd" - v1 "k8s.io/api/core/v1" ) func BuildETCDHelmValues() etcdhelmtypes.Etcd { return etcdhelmtypes.Etcd{ Global: etcdhelmtypes.Global{ - StorageClass: constants.PREMIUM_RWO_STORAGE_CLASS, + StorageClass: constants.STANDARD_STORAGE_CLASS, }, ReplicaCount: 1, ReadinessProbe: etcdhelmtypes.ReadinessProbe{ @@ -20,16 +19,16 @@ func BuildETCDHelmValues() etcdhelmtypes.Etcd { Persistence: etcdhelmtypes.Persistence{ Size: "10Gi", }, - Tolerations: []etcdhelmtypes.Toleration{ - { - Effect: string(v1.TaintEffectNoSchedule), - Key: constants.SANDBOX_GKE_IO_RUNTIME, - Operator: string(v1.NodeSelectorOpExists), - }, - }, - NodeSelector: etcdhelmtypes.NodeSelector{ - SandboxGKEIORuntime: constants.GVISOR, - }, + //Tolerations: []etcdhelmtypes.Toleration{ + // { + // Effect: string(v1.TaintEffectNoSchedule), + // Key: constants.SANDBOX_GKE_IO_RUNTIME, + // Operator: string(v1.NodeSelectorOpExists), + // }, + //}, + //NodeSelector: etcdhelmtypes.NodeSelector{ + // SandboxGKEIORuntime: constants.GVISOR, + //}, Auth: etcdhelmtypes.Auth{ Rbac: etcdhelmtypes.Rbac{ Create: false, diff --git a/src/pkg/helm/build/vcluster/build.go b/src/pkg/helm/build/vcluster/build.go index 3a7d2637..d8edd383 100644 --- a/src/pkg/helm/build/vcluster/build.go +++ b/src/pkg/helm/build/vcluster/build.go @@ -1,7 +1,6 @@ package vcluster import ( - "fmt" "github.com/UffizziCloud/uffizzi-cluster-operator/src/api/v1alpha1" "github.com/UffizziCloud/uffizzi-cluster-operator/src/controllers/etcd" "github.com/UffizziCloud/uffizzi-cluster-operator/src/pkg/constants" @@ -15,11 +14,9 @@ func BuildK3SHelmValues(uCluster *v1alpha1.UffizziCluster) (vcluster.K3S, string vclusterK3sHelmValues := vcluster.K3S{ VCluster: k3SAPIServer(uCluster), - Common: common(helmReleaseName, vclusterIngressHostname, uCluster.Spec.Provider), + Common: common(helmReleaseName, vclusterIngressHostname, uCluster.Spec.NodeSelector, uCluster.Spec.Toleration), } - // keep cluster data intact in case the vcluster scales up or down - vclusterK3sHelmValues.Storage.Persistence = true if uCluster.Spec.ExternalDatastore == constants.ETCD { vclusterK3sHelmValues.VCluster.Env = []vcluster.ContainerEnv{ { @@ -27,7 +24,6 @@ func BuildK3SHelmValues(uCluster *v1alpha1.UffizziCluster) (vcluster.K3S, string Value: "http://" + etcd.BuildEtcdHelmReleaseName(uCluster) + "." + uCluster.Namespace + ".svc.cluster.local:2379", }, } - vclusterK3sHelmValues.EnableHA = false } if uCluster.Spec.Ingress.Host != "" { @@ -86,10 +82,42 @@ func BuildK3SHelmValues(uCluster *v1alpha1.UffizziCluster) (vcluster.K3S, string vclusterK3sHelmValues.Isolation.LimitRange = lrHelmValues } - vclusterK3sHelmValues.Syncer.ExtraArgs = append(vclusterK3sHelmValues.Syncer.ExtraArgs, - "--tls-san="+vclusterIngressHostname, - "--out-kube-config-server="+outKubeConfigServerArgValue, - ) + if vclusterIngressHostname != "" { + vclusterK3sHelmValues.Syncer.ExtraArgs = append(vclusterK3sHelmValues.Syncer.ExtraArgs, + "--tls-san="+vclusterIngressHostname, + ) + } + + if outKubeConfigServerArgValue != "" { + vclusterK3sHelmValues.Syncer.ExtraArgs = append(vclusterK3sHelmValues.Syncer.ExtraArgs, + "--out-kube-config-server="+outKubeConfigServerArgValue, + ) + } + + for _, t := range uCluster.Spec.Toleration { + vclusterK3sHelmValues.Syncer.ExtraArgs = append(vclusterK3sHelmValues.Syncer.ExtraArgs, "--enforce-toleration="+vcluster.Toleration(t).Notation()) + } + + if len(uCluster.Spec.NodeSelector) > 0 { + for k, v := range uCluster.Spec.NodeSelector { + vclusterK3sHelmValues.Syncer.ExtraArgs = append(vclusterK3sHelmValues.Syncer.ExtraArgs, "--node-selector="+k+"="+v) + } + vclusterK3sHelmValues.Syncer.ExtraArgs = append(vclusterK3sHelmValues.Syncer.ExtraArgs, "--enforce-node-selector") + } + + // keep cluster data intact in case the vcluster scales up or down + vclusterK3sHelmValues.Syncer.Storage = vcluster.Storage{ + Persistence: true, + Size: "5Gi", + } + + if uCluster.Spec.Storage != nil { + storage := uCluster.Spec.Storage + vclusterK3sHelmValues.Syncer.Storage.Persistence = storage.Persistence + if len(uCluster.Spec.Storage.Size) > 0 { + vclusterK3sHelmValues.Syncer.Storage.Size = uCluster.Spec.Storage.Size + } + } if len(uCluster.Spec.Helm) > 0 { vclusterK3sHelmValues.Init.Helm = uCluster.Spec.Helm @@ -107,7 +135,7 @@ func BuildK8SHelmValues(uCluster *v1alpha1.UffizziCluster) (vcluster.K8S, string vclusterHelmValues := vcluster.K8S{ APIServer: k8SAPIServer(), - Common: common(helmReleaseName, vclusterIngressHostname, uCluster.Spec.Provider), + Common: common(helmReleaseName, vclusterIngressHostname, uCluster.Spec.NodeSelector, uCluster.Spec.Toleration), } if uCluster.Spec.APIServer.Image != "" { @@ -170,10 +198,17 @@ func BuildK8SHelmValues(uCluster *v1alpha1.UffizziCluster) (vcluster.K8S, string vclusterHelmValues.Isolation.LimitRange = lrHelmValues } - vclusterHelmValues.Syncer.ExtraArgs = append(vclusterHelmValues.Syncer.ExtraArgs, - "--tls-san="+vclusterIngressHostname, - "--out-kube-config-server="+outKubeConfigServerArgValue, - ) + if vclusterIngressHostname != "" { + vclusterHelmValues.Syncer.ExtraArgs = append(vclusterHelmValues.Syncer.ExtraArgs, + "--tls-san="+vclusterIngressHostname, + ) + } + + if outKubeConfigServerArgValue != "" { + vclusterHelmValues.Syncer.ExtraArgs = append(vclusterHelmValues.Syncer.ExtraArgs, + "--out-kube-config-server="+outKubeConfigServerArgValue, + ) + } if len(uCluster.Spec.Helm) > 0 { vclusterHelmValues.Init.Helm = uCluster.Spec.Helm @@ -213,7 +248,7 @@ func pluginsConfig() vcluster.Plugins { } } -func syncerConfig(helmReleaseName, provider string) vcluster.Syncer { +func syncerConfig(helmReleaseName string) vcluster.Syncer { syncer := vcluster.Syncer{ KubeConfigContextName: helmReleaseName, Limits: types.ContainerMemoryCPU{ @@ -221,19 +256,7 @@ func syncerConfig(helmReleaseName, provider string) vcluster.Syncer { Memory: "1024Mi", }, } - if provider == constants.PROVIDER_GKE { - syncer.ExtraArgs = append(syncer.ExtraArgs, []string{ - fmt.Sprintf( - "--enforce-toleration=%s:%s", - constants.SANDBOX_GKE_IO_RUNTIME, - string(v1.TaintEffectNoSchedule), - ), - "--node-selector=sandbox.gke.io/runtime=gvisor", - "--enforce-node-selector", - }...) - } else { - syncer.ExtraArgs = []string{} - } + return syncer } @@ -245,16 +268,6 @@ func syncConfig() vcluster.Sync { } } -func gkeTolerations() []vcluster.Toleration { - return []vcluster.Toleration{ - { - Key: constants.SANDBOX_GKE_IO_RUNTIME, - Effect: string(v1.TaintEffectNoSchedule), - Operator: string(v1.NodeSelectorOpExists), - }, - } -} - func securityContext() vcluster.SecurityContext { return vcluster.SecurityContext{ Capabilities: vcluster.SecurityContextCapabilities{ @@ -272,11 +285,11 @@ func isolation() vcluster.Isolation { Quota: vcluster.ResourceQuotaDefiniton{ RequestsCpu: "2.5", RequestsMemory: "10Gi", - RequestsEphemeralStorage: "15Gi", - RequestsStorage: "10Gi", + RequestsEphemeralStorage: "50Gi", + RequestsStorage: "20Gi", LimitsCpu: "20", LimitsMemory: "30Gi", - LimitsEphemeralStorage: "60Gi", + LimitsEphemeralStorage: "80Gi", ServicesLoadbalancers: 100, ServicesNodePorts: 0, CountEndpoints: 100, @@ -316,12 +329,6 @@ func ingress(VClusterIngressHostname string) vcluster.Ingress { } } -func gkeNodeSelector() vcluster.NodeSelector { - return vcluster.NodeSelector{ - SandboxGKEIORuntime: "gvisor", - } -} - func configStrings(uCluster *v1alpha1.UffizziCluster) (string, string, string) { helmReleaseName := BuildVClusterHelmReleaseName(uCluster) var ( @@ -347,27 +354,20 @@ func k8SAPIServer() vcluster.K8SAPIServer { } } -func common(helmReleaseName, vclusterIngressHostname, provider string) vcluster.Common { +func common(helmReleaseName, vclusterIngressHostname string, nodeSelector map[string]string, tolerations []v1.Toleration) vcluster.Common { c := vcluster.Common{ Init: vcluster.Init{}, FsGroup: 12345, Ingress: ingress(vclusterIngressHostname), Isolation: isolation(), + NodeSelector: nodeSelector, + Tolerations: tolerations, SecurityContext: securityContext(), - Tolerations: gkeTolerations(), Plugin: pluginsConfig(), - Syncer: syncerConfig(helmReleaseName, provider), + Syncer: syncerConfig(helmReleaseName), Sync: syncConfig(), } - if provider == constants.PROVIDER_GKE { - c.NodeSelector = gkeNodeSelector() - c.Tolerations = gkeTolerations() - } else { - c.NodeSelector = vcluster.NodeSelector{} - c.Tolerations = []vcluster.Toleration{} - } - return c } diff --git a/src/pkg/helm/types/vcluster/vcluster.go b/src/pkg/helm/types/vcluster/vcluster.go index cb2fdfd3..43a42520 100644 --- a/src/pkg/helm/types/vcluster/vcluster.go +++ b/src/pkg/helm/types/vcluster/vcluster.go @@ -3,22 +3,21 @@ package vcluster import ( "github.com/UffizziCloud/uffizzi-cluster-operator/src/api/v1alpha1" "github.com/UffizziCloud/uffizzi-cluster-operator/src/pkg/helm/types" + v1 "k8s.io/api/core/v1" ) type Common struct { - Init Init `json:"init,omitempty"` - Syncer Syncer `json:"syncer,omitempty"` - Sync Sync `json:"sync,omitempty"` - Ingress Ingress `json:"ingress,omitempty"` - FsGroup int64 `json:"fsgroup,omitempty"` - Isolation Isolation `json:"isolation,omitempty"` - NodeSelector NodeSelector `json:"nodeSelector,omitempty"` - SecurityContext SecurityContext `json:"securityContext,omitempty"` - Tolerations []Toleration `json:"tolerations,omitempty"` - MapServices MapServices `json:"mapServices,omitempty"` - Plugin Plugins `json:"plugin,omitempty"` - Storage Storage `json:"storage,omitempty"` - EnableHA bool `json:"enableHA,omitempty"` + Init Init `json:"init,omitempty"` + Syncer Syncer `json:"syncer,omitempty"` + Sync Sync `json:"sync,omitempty"` + Ingress Ingress `json:"ingress,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + FsGroup int64 `json:"fsgroup,omitempty"` + Isolation Isolation `json:"isolation,omitempty"` + SecurityContext SecurityContext `json:"securityContext,omitempty"` + MapServices MapServices `json:"mapServices,omitempty"` + Plugin Plugins `json:"plugin,omitempty"` } type K3S struct { @@ -35,8 +34,8 @@ type K8SAPIServer struct { Image string `json:"image,omitempty"` ExtraArgs []string `json:"extraArgs,omitempty"` Replicas int32 `json:"replicas,omitempty"` - NodeSelector NodeSelector `json:"nodeSelector,omitempty"` - Tolerations []Toleration `json:"tolerations,omitempty"` + NodeSelector v1.NodeSelector `json:"nodeSelector,omitempty"` + Tolerations []v1.Toleration `json:"tolerations,omitempty"` Labels map[string]string `json:"labels,omitempty"` Annotations map[string]string `json:"annotations,omitempty"` PodAnnotations map[string]string `json:"podAnnotations,omitempty"` @@ -79,6 +78,7 @@ type Syncer struct { KubeConfigContextName string `json:"kubeConfigContextName,omitempty"` ExtraArgs []string `json:"extraArgs,omitempty"` Limits types.ContainerMemoryCPU `json:"limits,omitempty"` + Storage Storage `json:"storage,omitempty"` } type Plugin struct { @@ -188,11 +188,6 @@ type Isolation struct { NetworkPolicy NetworkPolicy `json:"networkPolicy,omitempty"` } -// NodeSelector - parameters to define the node selector of the cluster -type NodeSelector struct { - SandboxGKEIORuntime string `json:"sandbox.gke.io/runtime,omitempty"` -} - type SecurityContextCapabilities struct { Drop []string `json:"drop"` } @@ -205,12 +200,13 @@ type SecurityContext struct { RunAsUser int64 `json:"runAsUser"` } -type Toleration struct { - Effect string `json:"effect"` - Key string `json:"key"` - Operator string `json:"operator"` +type Storage struct { + Persistence bool `json:"persistence"` + Size string `json:"size"` } -type Storage struct { - Persistence bool `json:"persistence"` +type Toleration v1.Toleration + +func (t Toleration) Notation() string { + return t.Key + "=" + t.Value + ":" + string(t.Effect) } diff --git a/src/pkg/utils/exec/exec.go b/src/pkg/utils/exec/exec.go new file mode 100644 index 00000000..f5408f5a --- /dev/null +++ b/src/pkg/utils/exec/exec.go @@ -0,0 +1,144 @@ +// Package exec provides a wrapper around the os/exec package +package exec + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "os/exec" + "runtime" + "sync" +) + +// SuppressGlobalInterrupt suppresses the global error on an interrupt +var SuppressGlobalInterrupt = false + +// Config is a struct for configuring the Cmd function. +type Config struct { + Print bool + Dir string + Env []string + Stdout io.Writer + Stderr io.Writer +} + +// PrintCfg is a helper function for returning a Config struct with Print set to true. +func PrintCfg() Config { + return Config{Print: true} +} + +// Cmd executes a given command with given config. +func Cmd(command string, args ...string) (string, string, error) { + return CmdWithContext(context.TODO(), Config{}, command, args...) +} + +// CmdWithPrint executes a given command with given config and prints the command. +func CmdWithPrint(command string, args ...string) error { + _, _, err := CmdWithContext(context.TODO(), PrintCfg(), command, args...) + return err +} + +// CmdWithContext executes a given command with given config. +func CmdWithContext(ctx context.Context, config Config, command string, args ...string) (string, string, error) { + if command == "" { + return "", "", errors.New("command is required") + } + + // Set up the command. + cmd := exec.CommandContext(ctx, command, args...) + cmd.Dir = config.Dir + cmd.Env = append(os.Environ(), config.Env...) + + // Capture the command outputs. + cmdStdout, _ := cmd.StdoutPipe() + cmdStderr, _ := cmd.StderrPipe() + + var ( + stdoutBuf, stderrBuf bytes.Buffer + errStdout, errStderr error + wg sync.WaitGroup + ) + + stdoutWriters := []io.Writer{ + &stdoutBuf, + } + + stdErrWriters := []io.Writer{ + &stderrBuf, + } + + // Add the writers if requested. + if config.Stdout != nil { + stdoutWriters = append(stdoutWriters, config.Stdout) + } + + if config.Stderr != nil { + stdErrWriters = append(stdErrWriters, config.Stderr) + } + + // Print to stdout if requested. + if config.Print { + stdoutWriters = append(stdoutWriters, os.Stdout) + stdErrWriters = append(stdErrWriters, os.Stderr) + } + + // Bind all the writers. + stdout := io.MultiWriter(stdoutWriters...) + stderr := io.MultiWriter(stdErrWriters...) + + //// If we're printing, print the command. + //if config.Print { + // message.Command("%s %s", command, strings.Join(args, " ")) + //} + + // Start the command. + if err := cmd.Start(); err != nil { + return "", "", err + } + + // Add to waitgroup for each goroutine. + wg.Add(2) + + // Run a goroutine to capture the command's stdout live. + go func() { + _, errStdout = io.Copy(stdout, cmdStdout) + wg.Done() + }() + + // Run a goroutine to capture the command's stderr live. + go func() { + _, errStderr = io.Copy(stderr, cmdStderr) + wg.Done() + }() + + // Wait for the goroutines to finish (if any). + wg.Wait() + + // Abort if there was an error capturing the command's outputs. + if errStdout != nil { + return "", "", fmt.Errorf("failed to capture the stdout command output: %w", errStdout) + } + if errStderr != nil { + return "", "", fmt.Errorf("failed to capture the stderr command output: %w", errStderr) + } + + // Wait for the command to finish and return the buffered outputs, regardless of whether we printed them. + return stdoutBuf.String(), stderrBuf.String(), cmd.Wait() +} + +// LaunchURL opens a URL in the default browser. +func LaunchURL(url string) error { + switch runtime.GOOS { + case "linux": + return exec.Command("xdg-open", url).Start() + case "windows": + return exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start() + case "darwin": + return exec.Command("open", url).Start() + } + + return nil +} diff --git a/src/test/e2e/suite_test.go b/src/test/e2e/suite_test.go index 43c3ff6d..d218d4f6 100644 --- a/src/test/e2e/suite_test.go +++ b/src/test/e2e/suite_test.go @@ -19,15 +19,20 @@ package e2e import ( "context" uffizziv1alpha1 "github.com/UffizziCloud/uffizzi-cluster-operator/src/api/v1alpha1" + "github.com/UffizziCloud/uffizzi-cluster-operator/src/controllers/etcd" "github.com/UffizziCloud/uffizzi-cluster-operator/src/controllers/uffizzicluster" + "github.com/UffizziCloud/uffizzi-cluster-operator/src/pkg/utils/exec" + "github.com/UffizziCloud/uffizzi-cluster-operator/src/test/util/resources" fluxhelmv2beta1 "github.com/fluxcd/helm-controller/api/v2beta1" fluxsourcev1beta2 "github.com/fluxcd/source-controller/api/v1beta2" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "k8s.io/utils/pointer" "math/rand" "os" "path/filepath" ctrl "sigs.k8s.io/controller-runtime" + ctrlcfg "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" "testing" "k8s.io/client-go/kubernetes/scheme" @@ -43,36 +48,50 @@ import ( // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. var ( - cfg *rest.Config - k8sClient client.Client - testEnv *envtest.Environment - ctx context.Context - cancel context.CancelFunc - useExistingCluster = getEnvtestRemote() + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc + e2e UffizziClusterE2E ) +type UffizziClusterE2E struct { + IsTainted bool + UseExistingCluster bool + K8SManager ctrl.Manager +} + +type TestDefinition struct { + Name string + Spec uffizziv1alpha1.UffizziClusterSpec +} + +func (td *TestDefinition) ExecLifecycleTest(ctx context.Context) { + ns := resources.CreateTestNamespace(td.Name) + uc := resources.CreateTestUffizziCluster(td.Name, ns.Name) + uc.Spec = td.Spec + wrapUffizziClusterLifecycleTest(ctx, ns, uc) +} + func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Controller Suite") } -// write a function to get the value of the ENVTEST_REMOTE environment variable -func getEnvtestRemote() bool { - if os.Getenv("ENVTEST_REMOTE") == "true" { - return true - } - return false -} - var _ = BeforeSuite(func() { logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + e2e = UffizziClusterE2E{ + IsTainted: getTaintedTestClusterEnvVar(), + UseExistingCluster: getEnvtestRemoteEnvVar(), + } By("bootstrapping test environment") testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, ErrorIfCRDPathMissing: true, - UseExistingCluster: &useExistingCluster, + UseExistingCluster: &e2e.UseExistingCluster, } var err error @@ -96,23 +115,7 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) - ctx, cancel = context.WithCancel(context.TODO()) - k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - }) - Expect(err).ToNot(HaveOccurred()) - - err = (&uffizzicluster.UffizziClusterReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred()) - - go func() { - defer GinkgoRecover() - err = k8sManager.Start(ctx) - Expect(err).ToNot(HaveOccurred(), "failed to run manager") - }() + go e2e.StartReconcilerWithArgs(5) }) var _ = AfterSuite(func() { @@ -127,17 +130,69 @@ func testingSeed() { rand.Seed(12345) } +// Kubectl executes a Kubectl command. +func (e2e *UffizziClusterE2E) Kubectl(args ...string) (string, string, error) { + // run kubectl with args and return stdout, stderr, and error + return exec.CmdWithContext(context.TODO(), exec.PrintCfg(), "kubectl", args...) +} + +func (e2e *UffizziClusterE2E) StartReconcilerWithArgs(concurrent int) { + e2e.K8SManager = NewTestK8SManager(concurrent) + err := (&uffizzicluster.UffizziClusterReconciler{ + Client: e2e.K8SManager.GetClient(), + Scheme: e2e.K8SManager.GetScheme(), + }).SetupWithManager(e2e.K8SManager) + Expect(err).ToNot(HaveOccurred()) + + err = (&etcd.UffizziClusterEtcdReconciler{ + Client: e2e.K8SManager.GetClient(), + Scheme: e2e.K8SManager.GetScheme(), + }).SetupWithManager(e2e.K8SManager) + Expect(err).ToNot(HaveOccurred()) + + defer GinkgoRecover() + err = e2e.K8SManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") +} + +func NewTestK8SManager(concurrent int) ctrl.Manager { + ctx, cancel = context.WithCancel(context.TODO()) + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + Controller: ctrlcfg.ControllerConfigurationSpec{ + GroupKindConcurrency: map[string]int{ + uffizziv1alpha1.SchemaGroupVersion.WithKind("UffizziCluster").GroupKind().String(): concurrent, + }, + RecoverPanic: pointer.Bool(true), + }, + }) + Expect(err).ToNot(HaveOccurred()) + return k8sManager +} + // CompareSlices compares two slices for equality. It returns true if both slices have the same elements in the same order. func compareSlices[T comparable](slice1, slice2 []T) bool { if len(slice1) != len(slice2) { return false } - for i, v := range slice1 { if v != slice2[i] { return false } } - return true } + +func getEnvtestRemoteEnvVar() bool { + if os.Getenv("ENVTEST_REMOTE") == "true" { + return true + } + return false +} + +func getTaintedTestClusterEnvVar() bool { + if os.Getenv("E2E_ARG_IS_TAINTED") == "true" { + return true + } + return false +} diff --git a/src/test/e2e/ucluster_lifecycle_test.go b/src/test/e2e/ucluster_lifecycle_test.go index a838aadf..08683a21 100644 --- a/src/test/e2e/ucluster_lifecycle_test.go +++ b/src/test/e2e/ucluster_lifecycle_test.go @@ -6,9 +6,8 @@ import ( "github.com/UffizziCloud/uffizzi-cluster-operator/src/controllers/uffizzicluster" "github.com/UffizziCloud/uffizzi-cluster-operator/src/pkg/constants" "github.com/UffizziCloud/uffizzi-cluster-operator/src/test/util/conditions" - "github.com/UffizziCloud/uffizzi-cluster-operator/src/test/util/diff" "github.com/UffizziCloud/uffizzi-cluster-operator/src/test/util/resources" - "github.com/google/go-cmp/cmp" + "github.com/fluxcd/pkg/apis/meta" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" @@ -16,8 +15,20 @@ import ( ) func wrapUffizziClusterLifecycleTest(ctx context.Context, ns *v1.Namespace, uc *v1alpha1.UffizziCluster) { - helmRelease := resources.GetHelmReleaseFromUffizziCluster(uc) - helmRepo := resources.GetHelmRepositoryFromUffizziCluster(uc) + var ( + timeout = "10m" + pollingTimeout = "100ms" + helmRelease = resources.GetHelmReleaseFromUffizziCluster(uc) + etcdHelmRelease = resources.GetETCDHelmReleaseFromUffizziCluster(uc) + helmRepo = resources.GetHelmRepositoryFromUffizziCluster(uc) + shouldSucceedQ = Succeed + shouldBeTrueQ = BeTrue + containsAllConditionsQ = func() func(requiredConditions, actualConditions []metav1.Condition) bool { + return conditions.ContainsAllConditions + } + ) + + // defer deletion of the uffizzi cluster and namespace defer Context("When deleting UffizziCluster", func() { It("Should delete the UffizziCluster", func() { By("By deleting the UffizziCluster") @@ -33,33 +44,60 @@ func wrapUffizziClusterLifecycleTest(ctx context.Context, ns *v1.Namespace, uc * It("Should create a UffizziCluster", func() { // By("By Creating Namespace for the UffizziCluster") - Expect(k8sClient.Create(ctx, ns)).Should(Succeed()) + Expect(k8sClient.Create(ctx, ns)).Should(shouldSucceedQ()) // By("By creating a new UffizziCluster") - Expect(k8sClient.Create(ctx, uc)).Should(Succeed()) + Expect(k8sClient.Create(ctx, uc)).Should(shouldSucceedQ()) }) It("Should create a HelmRelease and HelmRepository", func() { // - By("Checking if the HelmRelease was created") + By("Checking if the Loft HelmRepository was created and the repository is ready") Eventually(func() bool { - if err := k8sClient.Get(ctx, resources.CreateNamespacedName(helmRelease.Name, ns.Name), helmRelease); err != nil { + if err := k8sClient.Get(ctx, resources.CreateNamespacedName(constants.LOFT_HELM_REPO, ns.Name), helmRepo); err != nil { return false } + for _, c := range helmRepo.Status.Conditions { + if c.Type == meta.ReadyCondition { + return c.Status == metav1.ConditionTrue + } + } return true }) // - By("Checking if the Loft HelmRepository was created") + By("Checking if the HelmRelease was created") Eventually(func() bool { - if err := k8sClient.Get(ctx, resources.CreateNamespacedName(constants.LOFT_HELM_REPO, ns.Name), helmRepo); err != nil { + if err := k8sClient.Get(ctx, resources.CreateNamespacedName(helmRelease.Name, ns.Name), helmRelease); err != nil { return false } return true }) - }) + if uc.Spec.ExternalDatastore == constants.ETCD { + It("Should create a Bitnami HelmRepository", func() { + // + By("Checking if the Bitnami HelmRepository was created") + Eventually(func() bool { + if err := k8sClient.Get(ctx, resources.CreateNamespacedName(constants.BITNAMI_HELM_REPO, ns.Name), helmRepo); err != nil { + return false + } + return true + }) + }) + It("Should create a HelmRelease for ETCD", func() { + // + By("Checking if the HelmRelease for ETCD was created") + Eventually(func() bool { + if err := k8sClient.Get(ctx, resources.CreateNamespacedName(etcdHelmRelease.Name, ns.Name), etcdHelmRelease); err != nil { + return false + } + return true + }) + }) + } + It("Should initialize correctly", func() { expectedConditions := []metav1.Condition{} uffizziClusterNSN := resources.CreateNamespacedName(uc.Name, ns.Name) @@ -69,10 +107,10 @@ func wrapUffizziClusterLifecycleTest(ctx context.Context, ns *v1.Namespace, uc * return false } expectedConditions = uffizzicluster.GetAllInitializingConditions() - return conditions.ContainsAllConditions(expectedConditions, uc.Status.Conditions) - }, timeout, pollingTimeout).Should(BeTrue()) - d := cmp.Diff(expectedConditions, uc.Status.Conditions) - GinkgoWriter.Printf(diff.PrintWantGot(d)) + return containsAllConditionsQ()(expectedConditions, uc.Status.Conditions) + }, timeout, pollingTimeout).Should(shouldBeTrueQ()) + + //GinkgoWriter.Printf(conditions.CreateConditionsCmpDiff(expectedConditions, uc.Status.Conditions)) }) It("Should be in a Ready State", func() { @@ -84,10 +122,10 @@ func wrapUffizziClusterLifecycleTest(ctx context.Context, ns *v1.Namespace, uc * return false } expectedConditions = uffizzicluster.GetAllReadyConditions() - return conditions.ContainsAllConditions(expectedConditions, uc.Status.Conditions) - }, timeout, pollingTimeout).Should(BeTrue()) - d := cmp.Diff(expectedConditions, uc.Status.Conditions) - GinkgoWriter.Printf(diff.PrintWantGot(d)) + return containsAllConditionsQ()(expectedConditions, uc.Status.Conditions) + }, timeout, pollingTimeout).Should(shouldBeTrueQ()) + + //GinkgoWriter.Printf(conditions.CreateConditionsCmpDiff(expectedConditions, uc.Status.Conditions)) }) }) @@ -95,24 +133,21 @@ func wrapUffizziClusterLifecycleTest(ctx context.Context, ns *v1.Namespace, uc * It("Should put the cluster to sleep", func() { By("By putting the UffizziCluster to sleep") uc.Spec.Sleep = true - Expect(k8sClient.Update(ctx, uc)).Should(Succeed()) + Expect(k8sClient.Update(ctx, uc)).Should(shouldSucceedQ()) }) It("Should be in a Sleep State", func() { - expectedConditions := []metav1.Condition{ - uffizzicluster.APINotReady(), - uffizzicluster.DataStoreNotReady(), - } + expectedConditions := uffizzicluster.GetAllSleepConditions() uffizziClusterNSN := resources.CreateNamespacedName(uc.Name, ns.Name) By("Check if UffizziCluster has the correct Sleep conditions") Eventually(func() bool { if err := k8sClient.Get(ctx, uffizziClusterNSN, uc); err != nil { return false } - return conditions.ContainsAllConditions(expectedConditions, uc.Status.Conditions) - }, timeout, pollingTimeout).Should(BeTrue()) - d := cmp.Diff(expectedConditions, uc.Status.Conditions) - GinkgoWriter.Printf(diff.PrintWantGot(d)) + return containsAllConditionsQ()(expectedConditions, uc.Status.Conditions) + }, timeout, pollingTimeout).Should(shouldBeTrueQ()) + + //GinkgoWriter.Printf(conditions.CreateConditionsCmpDiff(expectedConditions, uc.Status.Conditions)) }) }) @@ -120,24 +155,21 @@ func wrapUffizziClusterLifecycleTest(ctx context.Context, ns *v1.Namespace, uc * It("Should wake the cluster up", func() { By("By waking the UffizziCluster up") uc.Spec.Sleep = false - Expect(k8sClient.Update(ctx, uc)).Should(Succeed()) + Expect(k8sClient.Update(ctx, uc)).Should(shouldSucceedQ()) }) It("Should be Awoken", func() { - expectedConditions := []metav1.Condition{ - uffizzicluster.APIReady(), - uffizzicluster.DataStoreReady(), - } + expectedConditions := uffizzicluster.GetAllAwokenConditions() uffizziClusterNSN := resources.CreateNamespacedName(uc.Name, ns.Name) By("Check if UffizziCluster has the correct Sleep conditions") Eventually(func() bool { if err := k8sClient.Get(ctx, uffizziClusterNSN, uc); err != nil { return false } - return conditions.ContainsAllConditions(expectedConditions, uc.Status.Conditions) - }, timeout, pollingTimeout).Should(BeTrue()) - d := cmp.Diff(expectedConditions, uc.Status.Conditions) - GinkgoWriter.Printf(diff.PrintWantGot(d)) + return containsAllConditionsQ()(expectedConditions, uc.Status.Conditions) + }, timeout, pollingTimeout).Should(shouldBeTrueQ()) + + //GinkgoWriter.Printf(conditions.CreateConditionsCmpDiff(expectedConditions, uc.Status.Conditions)) }) }) } diff --git a/src/test/e2e/uffizzicluster_controller_test.go b/src/test/e2e/uffizzicluster_controller_test.go index b8ce1ef5..35104aa0 100644 --- a/src/test/e2e/uffizzicluster_controller_test.go +++ b/src/test/e2e/uffizzicluster_controller_test.go @@ -3,32 +3,81 @@ package e2e import ( "context" "github.com/UffizziCloud/uffizzi-cluster-operator/src/api/v1alpha1" - "github.com/UffizziCloud/uffizzi-cluster-operator/src/test/util/resources" + "github.com/UffizziCloud/uffizzi-cluster-operator/src/pkg/constants" . "github.com/onsi/ginkgo/v2" + v1 "k8s.io/api/core/v1" ) -type TestDefinition struct { - Name string - Spec v1alpha1.UffizziClusterSpec -} +var _ = Describe("Basic Vanilla K3S UffizziCluster Lifecycle", func() { + BeforeEach(func() { + if e2e.IsTainted { + Skip("Skipping test because cluster is tainted") + } + }) + ctx := context.Background() + testUffizziCluster := TestDefinition{ + Name: "basic-k3s-test", + Spec: v1alpha1.UffizziClusterSpec{}, + } + testUffizziCluster.ExecLifecycleTest(ctx) +}) -func (td *TestDefinition) ExecLifecycleTest(ctx context.Context) { - ns := resources.CreateTestNamespace(td.Name) - uc := resources.CreateTestUffizziCluster(td.Name, ns.Name) - wrapUffizziClusterLifecycleTest(ctx, ns, uc) -} +var _ = Describe("Basic Vanilla K8S UffizziCluster Lifecycle", func() { + BeforeEach(func() { + if e2e.IsTainted { + Skip("Skipping test because cluster is tainted") + } + }) + ctx := context.Background() + testUffizziCluster := TestDefinition{ + Name: "basic-k8s-test", + Spec: v1alpha1.UffizziClusterSpec{ + Distro: "k8s", + }, + } + testUffizziCluster.ExecLifecycleTest(ctx) +}) -const ( - timeout = "1m" - pollingTimeout = "100ms" -) +var _ = Describe("Basic K3S UffizziCluster with ETCD Lifecycle", func() { + BeforeEach(func() { + if e2e.IsTainted { + Skip("Skipping test because cluster is tainted") + } + }) + ctx := context.Background() + testUffizziCluster := TestDefinition{ + Name: "k3s-etcd-test", + Spec: v1alpha1.UffizziClusterSpec{ + ExternalDatastore: constants.ETCD, + }, + } + testUffizziCluster.ExecLifecycleTest(ctx) +}) -var _ = Describe("Basic UffizziCluster Lifecycle", func() { +// Test against cluster with tainted nodes - good for testing node affinities + +var _ = Describe("UffizziCluster NodeSelector and Tolerations", func() { + BeforeEach(func() { + if !e2e.IsTainted { + Skip("Skipping test because cluster is not tainted") + } + }) ctx := context.Background() testUffizziCluster := TestDefinition{ - Name: "basic", - Spec: v1alpha1.UffizziClusterSpec{}, + Name: "k3s-nodeselector-toleration-test", + Spec: v1alpha1.UffizziClusterSpec{ + NodeSelector: map[string]string{ + "testkey": "testvalue", + }, + Toleration: []v1.Toleration{ + { + Key: "testkey", + Operator: "Equal", + Value: "testvalue", + Effect: "NoSchedule", + }, + }, + }, } - // run the testUffizziCluster testUffizziCluster.ExecLifecycleTest(ctx) }) diff --git a/src/test/util/conditions/conditions.go b/src/test/util/conditions/conditions.go index 3b2311ca..1b6dcef8 100644 --- a/src/test/util/conditions/conditions.go +++ b/src/test/util/conditions/conditions.go @@ -3,28 +3,72 @@ package conditions import ( "github.com/UffizziCloud/uffizzi-cluster-operator/src/test/util/diff" "github.com/google/go-cmp/cmp" - "github.com/onsi/ginkgo/v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Checks if the required conditions are present and match in the actual conditions slice. // Both requiredConditions and actualConditions are slices of metav1.Condition. -func ContainsAllConditions(requiredConditions, actualConditions []metav1.Condition) bool { +func ContainsConditionsDecorator(requiredConditions, actualConditions []metav1.Condition, flip bool) bool { + var foundQ = func(og bool) bool { + if flip { + return !og + } + return og + } + var didFindQ = func() bool { + return foundQ(true) + } + var notFoundQ = func() bool { + return !foundQ(true) + } for _, requiredCondition := range requiredConditions { - found := false + found := notFoundQ() for _, actualCondition := range actualConditions { if actualCondition.Type == requiredCondition.Type && actualCondition.Status == requiredCondition.Status { // Add more condition checks here if necessary (e.g., Reason, Message) - found = true - d := cmp.Diff(requiredConditions, actualConditions) - ginkgo.GinkgoWriter.Printf(diff.PrintWantGot(d)) + found = didFindQ() break } } if !found { - return false + return notFoundQ() + } + } + return didFindQ() +} + +// Return true if zero condtions match between required and actual conditions +func ContainsAllConditions(requiredConditions, actualConditions []metav1.Condition) bool { + return ContainsConditionsDecorator(requiredConditions, actualConditions, true) +} + +// Return true if zero condtions match between required and actual conditions +func ContainsNoConditions(requiredConditions, actualConditions []metav1.Condition) bool { + return ContainsConditionsDecorator(requiredConditions, actualConditions, false) +} + +func CreateConditionsCmpDiff(requiredConditions, actualConditions []metav1.Condition) string { + // create a clone of actual conditions which only has the keys required conditions has + // then compare the two slices + actualConditionsForCmp := []metav1.Condition{} + requiredConditionsForCmp := []metav1.Condition{} + for _, actualCondition := range actualConditions { + actualConditionForCmp := metav1.Condition{ + Type: actualCondition.Type, + Status: actualCondition.Status, + Reason: actualCondition.Reason, } + actualConditionsForCmp = append(actualConditionsForCmp, actualConditionForCmp) } - return true + for _, requiredCondition := range requiredConditions { + requiredConditionForCmp := metav1.Condition{ + Type: requiredCondition.Type, + Status: requiredCondition.Status, + Reason: requiredCondition.Reason, + } + requiredConditionsForCmp = append(requiredConditionsForCmp, requiredConditionForCmp) + } + + return diff.PrintWantGot(cmp.Diff(requiredConditionsForCmp, actualConditionsForCmp)) } diff --git a/src/test/util/resources/resources.go b/src/test/util/resources/resources.go index 827c005c..1aeb6df2 100644 --- a/src/test/util/resources/resources.go +++ b/src/test/util/resources/resources.go @@ -29,12 +29,6 @@ func CreateTestUffizziCluster(name, ns string) *v1alpha1.UffizziCluster { } } -func CreateTestUffizziClusterWithSpec(name, ns string, spec v1alpha1.UffizziClusterSpec) *v1alpha1.UffizziCluster { - uc := CreateTestUffizziCluster(name, ns) - uc.Spec = spec - return uc -} - func GetHelmReleaseFromUffizziCluster(uc *v1alpha1.UffizziCluster) *v2beta1.HelmRelease { return &v2beta1.HelmRelease{ ObjectMeta: metav1.ObjectMeta{ @@ -44,6 +38,15 @@ func GetHelmReleaseFromUffizziCluster(uc *v1alpha1.UffizziCluster) *v2beta1.Helm } } +func GetETCDHelmReleaseFromUffizziCluster(uc *v1alpha1.UffizziCluster) *v2beta1.HelmRelease { + return &v2beta1.HelmRelease{ + ObjectMeta: metav1.ObjectMeta{ + Name: "uc-etcd-" + uc.Name, + Namespace: uc.Namespace, + }, + } +} + func GetHelmRepositoryFromUffizziCluster(uc *v1alpha1.UffizziCluster) *v1beta2.HelmRepository { return &v1beta2.HelmRepository{ ObjectMeta: metav1.ObjectMeta{