From 26a6989a34a7f703a090cad834a0ee07202be3d1 Mon Sep 17 00:00:00 2001 From: testisnullus Date: Mon, 12 Feb 2024 15:45:46 +0200 Subject: [PATCH] on-premises provisioning for phase 1 was implemented --- .secrets.baseline | 64 ++++---- Makefile | 2 +- apis/clusters/v1beta1/cadence_types.go | 3 +- apis/clusters/v1beta1/cadence_webhook.go | 39 +---- apis/clusters/v1beta1/cassandra_types.go | 1 - apis/clusters/v1beta1/cassandra_webhook.go | 47 +----- apis/clusters/v1beta1/kafka_types.go | 1 - apis/clusters/v1beta1/kafka_webhook.go | 43 +----- apis/clusters/v1beta1/kafkaconnect_types.go | 7 +- apis/clusters/v1beta1/kafkaconnect_webhook.go | 44 +----- apis/clusters/v1beta1/postgresql_types.go | 1 - apis/clusters/v1beta1/postgresql_webhook.go | 44 +----- apis/clusters/v1beta1/redis_types.go | 3 +- apis/clusters/v1beta1/redis_webhook.go | 41 +----- apis/clusters/v1beta1/validation.go | 11 +- .../clusters/v1beta1/zz_generated.deepcopy.go | 30 ---- .../clusters.instaclustr.com_cadences.yaml | 43 ------ .../clusters.instaclustr.com_cassandras.yaml | 43 ------ ...lusters.instaclustr.com_kafkaconnects.yaml | 43 ------ .../clusters.instaclustr.com_kafkas.yaml | 43 ------ .../clusters.instaclustr.com_postgresqls.yaml | 43 ------ .../bases/clusters.instaclustr.com_redis.yaml | 43 ------ config/default/kustomization.yaml | 2 +- config/rbac/role.yaml | 8 - .../onpremises/clusters_v1beta1_cadence.yaml | 35 ----- .../clusters_v1beta1_cassandra.yaml | 29 +--- .../onpremises/clusters_v1beta1_kafka.yaml | 19 +-- .../clusters_v1beta1_kafkaconnect.yaml | 21 +-- .../clusters_v1beta1_postgresql.yaml | 21 +-- .../onpremises/clusters_v1beta1_redis.yaml | 22 +-- controllers/clusters/cadence_controller.go | 138 +----------------- controllers/clusters/cassandra_controller.go | 89 +---------- controllers/clusters/kafka_controller.go | 98 +------------ .../clusters/kafkaconnect_controller.go | 95 +----------- controllers/clusters/on_premises.go | 14 ++ controllers/clusters/postgresql_controller.go | 94 +----------- controllers/clusters/redis_controller.go | 91 +----------- pkg/models/validation.go | 2 +- 38 files changed, 137 insertions(+), 1280 deletions(-) delete mode 100644 config/samples/onpremises/clusters_v1beta1_cadence.yaml diff --git a/.secrets.baseline b/.secrets.baseline index 91ddac180..0f3527648 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -165,14 +165,14 @@ "filename": "apis/clusters/v1beta1/cadence_types.go", "hashed_secret": "a242f4a16b957f7ff99eb24e189e94d270d2348b", "is_verified": false, - "line_number": 281 + "line_number": 280 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/cadence_types.go", "hashed_secret": "a57ce131bd944bdf8ba2f2f93e179dc416ed0315", "is_verified": false, - "line_number": 291 + "line_number": 290 } ], "apis/clusters/v1beta1/cassandra_types.go": [ @@ -181,21 +181,21 @@ "filename": "apis/clusters/v1beta1/cassandra_types.go", "hashed_secret": "331cc743251c3b9504229de4d139c539da121a33", "is_verified": false, - "line_number": 262 + "line_number": 261 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/cassandra_types.go", "hashed_secret": "0ad8d7005e084d4f028a4277b73c6fab24269c17", "is_verified": false, - "line_number": 348 + "line_number": 347 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/cassandra_types.go", "hashed_secret": "e0a46b27231f798fe22dc4d5d82b5feeb5dcf085", "is_verified": false, - "line_number": 413 + "line_number": 412 } ], "apis/clusters/v1beta1/cassandra_webhook.go": [ @@ -204,7 +204,7 @@ "filename": "apis/clusters/v1beta1/cassandra_webhook.go", "hashed_secret": "e0a46b27231f798fe22dc4d5d82b5feeb5dcf085", "is_verified": false, - "line_number": 260 + "line_number": 229 } ], "apis/clusters/v1beta1/kafka_types.go": [ @@ -213,14 +213,14 @@ "filename": "apis/clusters/v1beta1/kafka_types.go", "hashed_secret": "964c67cddfe8e6707157152dcf319126502199dc", "is_verified": false, - "line_number": 294 + "line_number": 293 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/kafka_types.go", "hashed_secret": "589a0ad3cc6bc886a00c46a22e5065c48bd8e1b2", "is_verified": false, - "line_number": 440 + "line_number": 439 } ], "apis/clusters/v1beta1/kafkaconnect_types.go": [ @@ -229,84 +229,84 @@ "filename": "apis/clusters/v1beta1/kafkaconnect_types.go", "hashed_secret": "46fe9b29395041087f91b33bd8c5c6177cd42fd1", "is_verified": false, - "line_number": 247 + "line_number": 246 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/kafkaconnect_types.go", "hashed_secret": "4b3af1508421e2fa591c5b260c36dd06fdd872a5", "is_verified": false, - "line_number": 285 + "line_number": 284 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/kafkaconnect_types.go", "hashed_secret": "cf45830dd81b7e1a8b5ffbc2d95b112771524117", "is_verified": false, - "line_number": 295 + "line_number": 294 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/kafkaconnect_types.go", "hashed_secret": "138905ac46675150bf790088ec56b2efc6a64697", "is_verified": false, - "line_number": 306 + "line_number": 305 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/kafkaconnect_types.go", "hashed_secret": "3948059919ffeee8ecc42149cb386f43d2f06f74", "is_verified": false, - "line_number": 311 + "line_number": 310 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/kafkaconnect_types.go", "hashed_secret": "87f1180476a944c4162d1af55efedc8f3e3b609c", "is_verified": false, - "line_number": 520 + "line_number": 519 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/kafkaconnect_types.go", "hashed_secret": "f0f06c9167ce61a586749bb183ac6a3756dd6010", "is_verified": false, - "line_number": 530 + "line_number": 529 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/kafkaconnect_types.go", "hashed_secret": "2042128e13ef5ede4af44271160c72f64564c632", "is_verified": false, - "line_number": 541 + "line_number": 540 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/kafkaconnect_types.go", "hashed_secret": "82dc9ca8ba09262ce948227aeb5d9db8084eeb5d", "is_verified": false, - "line_number": 546 + "line_number": 545 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/kafkaconnect_types.go", "hashed_secret": "5f915325aef923cdc945f639f14c2f854b4214d6", "is_verified": false, - "line_number": 570 + "line_number": 569 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/kafkaconnect_types.go", "hashed_secret": "5ffe533b830f08a0326348a9160afafc8ada44db", "is_verified": false, - "line_number": 603 + "line_number": 602 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/kafkaconnect_types.go", "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", "is_verified": false, - "line_number": 608 + "line_number": 607 } ], "apis/clusters/v1beta1/postgresql_types.go": [ @@ -315,21 +315,21 @@ "filename": "apis/clusters/v1beta1/postgresql_types.go", "hashed_secret": "5ffe533b830f08a0326348a9160afafc8ada44db", "is_verified": false, - "line_number": 352 + "line_number": 351 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/postgresql_types.go", "hashed_secret": "a3d7d4a96d18c8fc5a1cf9c9c01c45b4690b4008", "is_verified": false, - "line_number": 358 + "line_number": 357 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/postgresql_types.go", "hashed_secret": "a57ce131bd944bdf8ba2f2f93e179dc416ed0315", "is_verified": false, - "line_number": 478 + "line_number": 477 } ], "apis/clusters/v1beta1/redis_types.go": [ @@ -338,21 +338,21 @@ "filename": "apis/clusters/v1beta1/redis_types.go", "hashed_secret": "bc1c5ae5fd4a238d86261f422e62c489de408c22", "is_verified": false, - "line_number": 169 + "line_number": 168 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/redis_types.go", "hashed_secret": "d62d56668a8c859e768e8250ed2fb690d03cead3", "is_verified": false, - "line_number": 224 + "line_number": 223 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/redis_types.go", "hashed_secret": "d0e8e6fc5dce4d2b452e344ae41900b566ac01d1", "is_verified": false, - "line_number": 269 + "line_number": 268 } ], "apis/clusters/v1beta1/redis_webhook.go": [ @@ -361,7 +361,7 @@ "filename": "apis/clusters/v1beta1/redis_webhook.go", "hashed_secret": "bc1c5ae5fd4a238d86261f422e62c489de408c22", "is_verified": false, - "line_number": 343 + "line_number": 316 } ], "apis/clusters/v1beta1/zookeeper_types.go": [ @@ -386,7 +386,7 @@ "filename": "apis/clusters/v1beta1/zz_generated.deepcopy.go", "hashed_secret": "44e17306b837162269a410204daaa5ecee4ec22c", "is_verified": false, - "line_number": 2223 + "line_number": 2198 } ], "apis/kafkamanagement/v1beta1/kafkauser_types.go": [ @@ -527,14 +527,14 @@ "filename": "controllers/clusters/cadence_controller.go", "hashed_secret": "bcf196cdeea4d7ed8b04dcbbd40111eb5e9abeac", "is_verified": false, - "line_number": 774 + "line_number": 659 }, { "type": "Secret Keyword", "filename": "controllers/clusters/cadence_controller.go", "hashed_secret": "192d703e91a60432ce06bfe26adfd12f5c7b931f", "is_verified": false, - "line_number": 816 + "line_number": 701 } ], "controllers/clusters/datatest/kafka_v1beta1.yaml": [ @@ -570,7 +570,7 @@ "filename": "controllers/clusters/postgresql_controller.go", "hashed_secret": "5ffe533b830f08a0326348a9160afafc8ada44db", "is_verified": false, - "line_number": 1362 + "line_number": 1272 } ], "controllers/clusters/zookeeper_controller_test.go": [ @@ -1126,5 +1126,5 @@ } ] }, - "generated_at": "2024-02-08T13:39:05Z" + "generated_at": "2024-02-12T13:45:29Z" } diff --git a/Makefile b/Makefile index 5f9893459..507a1cd2a 100644 --- a/Makefile +++ b/Makefile @@ -83,7 +83,7 @@ test-webhooks: KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./apis/clusters/v1beta1 -coverprofile cover.out .PHONY: test - test: manifests generate fmt vet docker-build-server-stub run-server-stub envtest test-clusters test-clusterresources test-kafkamanagement test-users stop-server-stub + test: manifests generate fmt vet docker-build-server-stub run-server-stub envtest test-clusters test-clusterresources test-webhooks test-kafkamanagement test-users stop-server-stub .PHONY: goimports goimports: diff --git a/apis/clusters/v1beta1/cadence_types.go b/apis/clusters/v1beta1/cadence_types.go index 50d80930a..579336a79 100644 --- a/apis/clusters/v1beta1/cadence_types.go +++ b/apis/clusters/v1beta1/cadence_types.go @@ -61,8 +61,7 @@ type BundledOpenSearchSpec struct { // CadenceSpec defines the desired state of Cadence type CadenceSpec struct { - Cluster `json:",inline"` - OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"` + Cluster `json:",inline"` //+kubebuilder:validation:MinItems:=1 //+kubebuilder:validation:MaxItems:=1 DataCentres []*CadenceDataCentre `json:"dataCentres"` diff --git a/apis/clusters/v1beta1/cadence_webhook.go b/apis/clusters/v1beta1/cadence_webhook.go index f51a1078b..28c682f74 100644 --- a/apis/clusters/v1beta1/cadence_webhook.go +++ b/apis/clusters/v1beta1/cadence_webhook.go @@ -89,27 +89,6 @@ func (cv *cadenceValidator) ValidateCreate(ctx context.Context, obj runtime.Obje return err } - contains, err := ContainsKubeVirtAddon(ctx, cv.Client) - if err != nil { - return err - } - - if c.Spec.OnPremisesSpec != nil && c.Spec.OnPremisesSpec.EnableAutomation { - if !contains { - return models.ErrKubeVirtAddonNotFound - } - err = c.Spec.OnPremisesSpec.ValidateCreation() - if err != nil { - return err - } - if c.Spec.PrivateNetworkCluster { - err = c.Spec.OnPremisesSpec.ValidateSSHGatewayCreation() - if err != nil { - return err - } - } - } - appVersions, err := cv.API.ListAppVersions(models.CadenceAppKind) if err != nil { return fmt.Errorf("cannot list versions for kind: %v, err: %w", @@ -198,22 +177,10 @@ func (cv *cadenceValidator) ValidateCreate(ctx context.Context, obj runtime.Obje return fmt.Errorf("data centres field is empty") } - //TODO: add support of multiple DCs for OnPrem clusters - if len(c.Spec.DataCentres) > 1 && c.Spec.OnPremisesSpec != nil { - return fmt.Errorf("on-premises cluster can be provisioned with only one data centre") - } - for _, dc := range c.Spec.DataCentres { - if c.Spec.OnPremisesSpec != nil { - err = dc.DataCentre.ValidateOnPremisesCreation() - if err != nil { - return err - } - } else { - err = dc.DataCentre.ValidateCreation() - if err != nil { - return err - } + err = dc.DataCentre.ValidateCreation() + if err != nil { + return err } if !c.Spec.PrivateNetworkCluster && dc.PrivateLink != nil { diff --git a/apis/clusters/v1beta1/cassandra_types.go b/apis/clusters/v1beta1/cassandra_types.go index c8d366f43..5808ee3f9 100644 --- a/apis/clusters/v1beta1/cassandra_types.go +++ b/apis/clusters/v1beta1/cassandra_types.go @@ -54,7 +54,6 @@ type CassandraSpec struct { GenericClusterSpec `json:",inline"` RestoreFrom *CassandraRestoreFrom `json:"restoreFrom,omitempty"` - OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"` DataCentres []*CassandraDataCentre `json:"dataCentres,omitempty"` LuceneEnabled bool `json:"luceneEnabled,omitempty"` PasswordAndUserAuth bool `json:"passwordAndUserAuth,omitempty"` diff --git a/apis/clusters/v1beta1/cassandra_webhook.go b/apis/clusters/v1beta1/cassandra_webhook.go index 1cd98551b..e2bbfd447 100644 --- a/apis/clusters/v1beta1/cassandra_webhook.go +++ b/apis/clusters/v1beta1/cassandra_webhook.go @@ -49,8 +49,6 @@ func (r *Cassandra) SetupWebhookWithManager(mgr ctrl.Manager, api validation.Val // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. //+kubebuilder:webhook:path=/mutate-clusters-instaclustr-com-v1beta1-cassandra,mutating=true,failurePolicy=fail,sideEffects=None,groups=clusters.instaclustr.com,resources=cassandras,verbs=create;update,versions=v1beta1,name=mcassandra.kb.io,admissionReviewVersions=v1 //+kubebuilder:webhook:path=/validate-clusters-instaclustr-com-v1beta1-cassandra,mutating=false,failurePolicy=fail,sideEffects=None,groups=clusters.instaclustr.com,resources=cassandras,verbs=create;update,versions=v1beta1,name=vcassandra.kb.io,admissionReviewVersions=v1 -//+kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch -//+kubebuilder:rbac:groups="apps",resources=deployments,verbs=get;list;watch var _ webhook.CustomValidator = &cassandraValidator{} var _ webhook.Defaulter = &Cassandra{} @@ -92,28 +90,6 @@ func (cv *cassandraValidator) ValidateCreate(ctx context.Context, obj runtime.Ob return err } - contains, err := ContainsKubeVirtAddon(ctx, cv.Client) - if err != nil { - return err - } - - if c.Spec.OnPremisesSpec != nil && c.Spec.OnPremisesSpec.EnableAutomation { - if !contains { - return models.ErrKubeVirtAddonNotFound - } - - err = c.Spec.OnPremisesSpec.ValidateCreation() - if err != nil { - return err - } - if c.Spec.PrivateNetwork { - err = c.Spec.OnPremisesSpec.ValidateSSHGatewayCreation() - if err != nil { - return err - } - } - } - appVersions, err := cv.API.ListAppVersions(models.CassandraAppKind) if err != nil { return fmt.Errorf("cannot list versions for kind: %v, err: %w", @@ -129,22 +105,15 @@ func (cv *cassandraValidator) ValidateCreate(ctx context.Context, obj runtime.Ob return fmt.Errorf("data centres field is empty") } - //TODO: add support of multiple DCs for OnPrem clusters - if len(c.Spec.DataCentres) > 1 && c.Spec.OnPremisesSpec != nil { - return fmt.Errorf("on-premises cluster can be provisioned with only one data centre") - } - for _, dc := range c.Spec.DataCentres { - if c.Spec.OnPremisesSpec != nil { - err = dc.GenericDataCentreSpec.ValidateOnPremisesCreation() - if err != nil { - return err - } - } else { - err = dc.GenericDataCentreSpec.validateCreation() - if err != nil { - return err - } + //TODO: add support of multiple DCs for OnPrem clusters + if len(c.Spec.DataCentres) > 1 && dc.CloudProvider == models.ONPREMISES { + return models.ErrOnPremicesWithMultiDC + } + + err = dc.GenericDataCentreSpec.validateCreation() + if err != nil { + return err } if !c.Spec.PrivateNetwork && dc.PrivateIPBroadcastForDiscovery { diff --git a/apis/clusters/v1beta1/kafka_types.go b/apis/clusters/v1beta1/kafka_types.go index 6d62c7254..3ca495dff 100644 --- a/apis/clusters/v1beta1/kafka_types.go +++ b/apis/clusters/v1beta1/kafka_types.go @@ -60,7 +60,6 @@ type KarapaceSchemaRegistry struct { type KafkaSpec struct { GenericClusterSpec `json:",inline"` - OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"` // ReplicationFactor to use for new topic. // Also represents the number of racks to use when allocating nodes. ReplicationFactor int `json:"replicationFactor"` diff --git a/apis/clusters/v1beta1/kafka_webhook.go b/apis/clusters/v1beta1/kafka_webhook.go index 3be0652f6..94414d059 100644 --- a/apis/clusters/v1beta1/kafka_webhook.go +++ b/apis/clusters/v1beta1/kafka_webhook.go @@ -85,27 +85,6 @@ func (kv *kafkaValidator) ValidateCreate(ctx context.Context, obj runtime.Object return err } - contains, err := ContainsKubeVirtAddon(ctx, kv.Client) - if err != nil { - return err - } - - if k.Spec.OnPremisesSpec != nil && k.Spec.OnPremisesSpec.EnableAutomation { - if !contains { - return models.ErrKubeVirtAddonNotFound - } - err = k.Spec.OnPremisesSpec.ValidateCreation() - if err != nil { - return err - } - if k.Spec.PrivateNetwork { - err = k.Spec.OnPremisesSpec.ValidateSSHGatewayCreation() - if err != nil { - return err - } - } - } - appVersions, err := kv.API.ListAppVersions(models.KafkaAppKind) if err != nil { return fmt.Errorf("cannot list versions for kind: %v, err: %w", @@ -121,22 +100,14 @@ func (kv *kafkaValidator) ValidateCreate(ctx context.Context, obj runtime.Object return models.ErrZeroDataCentres } - //TODO: add support of multiple DCs for OnPrem clusters - if len(k.Spec.DataCentres) > 1 && k.Spec.OnPremisesSpec != nil { - return fmt.Errorf("on-premises cluster can be provisioned with only one data centre") - } - for _, dc := range k.Spec.DataCentres { - if k.Spec.OnPremisesSpec != nil { - err = dc.GenericDataCentreSpec.ValidateOnPremisesCreation() - if err != nil { - return err - } - } else { - err = dc.GenericDataCentreSpec.validateCreation() - if err != nil { - return err - } + //TODO: add support of multiple DCs for OnPrem clusters + if len(k.Spec.DataCentres) > 1 && dc.CloudProvider == models.ONPREMISES { + return models.ErrOnPremicesWithMultiDC + } + err = dc.GenericDataCentreSpec.validateCreation() + if err != nil { + return err } if len(dc.PrivateLink) > 1 { diff --git a/apis/clusters/v1beta1/kafkaconnect_types.go b/apis/clusters/v1beta1/kafkaconnect_types.go index 55e456dba..d4363e15e 100644 --- a/apis/clusters/v1beta1/kafkaconnect_types.go +++ b/apis/clusters/v1beta1/kafkaconnect_types.go @@ -108,10 +108,9 @@ type KafkaConnectDataCentre struct { // KafkaConnectSpec defines the desired state of KafkaConnect type KafkaConnectSpec struct { - Cluster `json:",inline"` - OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"` - DataCentres []*KafkaConnectDataCentre `json:"dataCentres"` - TargetCluster []*TargetCluster `json:"targetCluster"` + Cluster `json:",inline"` + DataCentres []*KafkaConnectDataCentre `json:"dataCentres"` + TargetCluster []*TargetCluster `json:"targetCluster"` // CustomConnectors defines the location for custom connector storage and access info. CustomConnectors []*CustomConnectors `json:"customConnectors,omitempty"` diff --git a/apis/clusters/v1beta1/kafkaconnect_webhook.go b/apis/clusters/v1beta1/kafkaconnect_webhook.go index 0f550f945..cc188edb0 100644 --- a/apis/clusters/v1beta1/kafkaconnect_webhook.go +++ b/apis/clusters/v1beta1/kafkaconnect_webhook.go @@ -89,27 +89,6 @@ func (kcv *kafkaConnectValidator) ValidateCreate(ctx context.Context, obj runtim return err } - contains, err := ContainsKubeVirtAddon(ctx, kcv.Client) - if err != nil { - return err - } - - if kc.Spec.OnPremisesSpec != nil && kc.Spec.OnPremisesSpec.EnableAutomation { - if !contains { - return models.ErrKubeVirtAddonNotFound - } - err = kc.Spec.OnPremisesSpec.ValidateCreation() - if err != nil { - return err - } - if kc.Spec.PrivateNetworkCluster { - err = kc.Spec.OnPremisesSpec.ValidateSSHGatewayCreation() - if err != nil { - return err - } - } - } - appVersions, err := kcv.API.ListAppVersions(models.KafkaConnectAppKind) if err != nil { return fmt.Errorf("cannot list versions for kind: %v, err: %w", @@ -161,22 +140,15 @@ func (kcv *kafkaConnectValidator) ValidateCreate(ctx context.Context, obj runtim return fmt.Errorf("data centres field is empty") } - //TODO: add support of multiple DCs for OnPrem clusters - if len(kc.Spec.DataCentres) > 1 && kc.Spec.OnPremisesSpec != nil { - return fmt.Errorf("on-premises cluster can be provisioned with only one data centre") - } - for _, dc := range kc.Spec.DataCentres { - if kc.Spec.OnPremisesSpec != nil { - err = dc.DataCentre.ValidateOnPremisesCreation() - if err != nil { - return err - } - } else { - err = dc.DataCentre.ValidateCreation() - if err != nil { - return err - } + //TODO: add support of multiple DCs for OnPrem clusters + if len(kc.Spec.DataCentres) > 1 && dc.CloudProvider == models.ONPREMISES { + return models.ErrOnPremicesWithMultiDC + } + + err = dc.DataCentre.ValidateCreation() + if err != nil { + return err } err = validateReplicationFactor(models.KafkaConnectReplicationFactors, dc.ReplicationFactor) diff --git a/apis/clusters/v1beta1/postgresql_types.go b/apis/clusters/v1beta1/postgresql_types.go index 794e79741..1f49b3901 100644 --- a/apis/clusters/v1beta1/postgresql_types.go +++ b/apis/clusters/v1beta1/postgresql_types.go @@ -74,7 +74,6 @@ type PgRestoreFrom struct { type PgSpec struct { PgRestoreFrom *PgRestoreFrom `json:"pgRestoreFrom,omitempty"` Cluster `json:",inline"` - OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"` DataCentres []*PgDataCentre `json:"dataCentres,omitempty"` ClusterConfigurations map[string]string `json:"clusterConfigurations,omitempty"` SynchronousModeStrict bool `json:"synchronousModeStrict,omitempty"` diff --git a/apis/clusters/v1beta1/postgresql_webhook.go b/apis/clusters/v1beta1/postgresql_webhook.go index 0fdab47ac..11a516799 100644 --- a/apis/clusters/v1beta1/postgresql_webhook.go +++ b/apis/clusters/v1beta1/postgresql_webhook.go @@ -96,27 +96,6 @@ func (pgv *pgValidator) ValidateCreate(ctx context.Context, obj runtime.Object) return err } - contains, err := ContainsKubeVirtAddon(ctx, pgv.K8sClient) - if err != nil { - return err - } - - if pg.Spec.OnPremisesSpec != nil && pg.Spec.OnPremisesSpec.EnableAutomation { - if !contains { - return models.ErrKubeVirtAddonNotFound - } - err = pg.Spec.OnPremisesSpec.ValidateCreation() - if err != nil { - return err - } - if pg.Spec.PrivateNetworkCluster { - err = pg.Spec.OnPremisesSpec.ValidateSSHGatewayCreation() - if err != nil { - return err - } - } - } - if pg.Spec.UserRefs != nil { err = pgv.validatePostgreSQLUsers(ctx, pg) if err != nil { @@ -139,22 +118,15 @@ func (pgv *pgValidator) ValidateCreate(ctx context.Context, obj runtime.Object) return models.ErrZeroDataCentres } - //TODO: add support of multiple DCs for OnPrem clusters - if len(pg.Spec.DataCentres) > 1 && pg.Spec.OnPremisesSpec != nil { - return fmt.Errorf("on-premises cluster can be provisioned with only one data centre") - } - for _, dc := range pg.Spec.DataCentres { - if pg.Spec.OnPremisesSpec != nil { - err = dc.DataCentre.ValidateOnPremisesCreation() - if err != nil { - return err - } - } else { - err = dc.DataCentre.ValidateCreation() - if err != nil { - return err - } + //TODO: add support of multiple DCs for OnPrem clusters + if len(pg.Spec.DataCentres) > 1 && dc.CloudProvider == models.ONPREMISES { + return models.ErrOnPremicesWithMultiDC + } + + err = dc.DataCentre.ValidateCreation() + if err != nil { + return err } err = dc.ValidatePGBouncer() diff --git a/apis/clusters/v1beta1/redis_types.go b/apis/clusters/v1beta1/redis_types.go index 0f71faafd..10fba2e8e 100644 --- a/apis/clusters/v1beta1/redis_types.go +++ b/apis/clusters/v1beta1/redis_types.go @@ -67,8 +67,7 @@ type RedisRestoreFrom struct { type RedisSpec struct { GenericClusterSpec `json:",inline"` - RestoreFrom *RedisRestoreFrom `json:"restoreFrom,omitempty"` - OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"` + RestoreFrom *RedisRestoreFrom `json:"restoreFrom,omitempty"` // Enables client to node encryption ClientEncryption bool `json:"clientEncryption"` diff --git a/apis/clusters/v1beta1/redis_webhook.go b/apis/clusters/v1beta1/redis_webhook.go index 37cf377d4..a82967eb7 100644 --- a/apis/clusters/v1beta1/redis_webhook.go +++ b/apis/clusters/v1beta1/redis_webhook.go @@ -103,27 +103,6 @@ func (rv *redisValidator) ValidateCreate(ctx context.Context, obj runtime.Object return err } - contains, err := ContainsKubeVirtAddon(ctx, rv.Client) - if err != nil { - return err - } - - if r.Spec.OnPremisesSpec != nil && r.Spec.OnPremisesSpec.EnableAutomation { - if !contains { - return models.ErrKubeVirtAddonNotFound - } - err = r.Spec.OnPremisesSpec.ValidateCreation() - if err != nil { - return err - } - if r.Spec.PrivateNetwork { - err = r.Spec.OnPremisesSpec.ValidateSSHGatewayCreation() - if err != nil { - return err - } - } - } - err = r.Spec.ValidatePrivateLink() if err != nil { return err @@ -145,23 +124,17 @@ func (rv *redisValidator) ValidateCreate(ctx context.Context, obj runtime.Object } if len(r.Spec.DataCentres) > 1 { - if r.Spec.OnPremisesSpec != nil { - return models.ErrOnPremicesWithMultiDC - } return models.ErrCreateClusterWithMultiDC } for _, dc := range r.Spec.DataCentres { - if r.Spec.OnPremisesSpec != nil { - err = dc.GenericDataCentreSpec.ValidateOnPremisesCreation() - if err != nil { - return err - } - } else { - err = dc.ValidateCreate() - if err != nil { - return err - } + //TODO: add support of multiple DCs for OnPrem clusters + if len(r.Spec.DataCentres) > 1 && dc.CloudProvider == models.ONPREMISES { + return models.ErrOnPremicesWithMultiDC + } + err = dc.ValidateCreate() + if err != nil { + return err } } diff --git a/apis/clusters/v1beta1/validation.go b/apis/clusters/v1beta1/validation.go index 1168455d5..d98058e31 100644 --- a/apis/clusters/v1beta1/validation.go +++ b/apis/clusters/v1beta1/validation.go @@ -61,21 +61,26 @@ func (dc *DataCentre) ValidateCreation() error { } switch dc.CloudProvider { - case "AWS_VPC": + case models.AWSVPC: if !validation.Contains(dc.Region, models.AWSRegions) { return fmt.Errorf("AWS Region: %s is unavailable, available regions: %v", dc.Region, models.AWSRegions) } - case "AZURE_AZ": + case models.AZUREAZ: if !validation.Contains(dc.Region, models.AzureRegions) { return fmt.Errorf("azure Region: %s is unavailable, available regions: %v", dc.Region, models.AzureRegions) } - case "GCP": + case models.GCP: if !validation.Contains(dc.Region, models.GCPRegions) { return fmt.Errorf("GCP Region: %s is unavailable, available regions: %v", dc.Region, models.GCPRegions) } + case models.ONPREMISES: + if dc.Region != models.CLIENTDC { + return fmt.Errorf("ONPREMISES Region: %s is unavailable, available regions: %v", + dc.Region, models.CLIENTDC) + } } if dc.ProviderAccountName == models.DefaultAccountName && len(dc.CloudProviderSettings) != 0 { diff --git a/apis/clusters/v1beta1/zz_generated.deepcopy.go b/apis/clusters/v1beta1/zz_generated.deepcopy.go index 0baf832be..eedf2a10e 100644 --- a/apis/clusters/v1beta1/zz_generated.deepcopy.go +++ b/apis/clusters/v1beta1/zz_generated.deepcopy.go @@ -233,11 +233,6 @@ func (in *CadenceList) DeepCopyObject() runtime.Object { func (in *CadenceSpec) DeepCopyInto(out *CadenceSpec) { *out = *in in.Cluster.DeepCopyInto(&out.Cluster) - if in.OnPremisesSpec != nil { - in, out := &in.OnPremisesSpec, &out.OnPremisesSpec - *out = new(OnPremisesSpec) - (*in).DeepCopyInto(*out) - } if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres *out = make([]*CadenceDataCentre, len(*in)) @@ -513,11 +508,6 @@ func (in *CassandraSpec) DeepCopyInto(out *CassandraSpec) { *out = new(CassandraRestoreFrom) (*in).DeepCopyInto(*out) } - if in.OnPremisesSpec != nil { - in, out := &in.OnPremisesSpec, &out.OnPremisesSpec - *out = new(OnPremisesSpec) - (*in).DeepCopyInto(*out) - } if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres *out = make([]*CassandraDataCentre, len(*in)) @@ -1213,11 +1203,6 @@ func (in *KafkaConnectList) DeepCopyObject() runtime.Object { func (in *KafkaConnectSpec) DeepCopyInto(out *KafkaConnectSpec) { *out = *in in.Cluster.DeepCopyInto(&out.Cluster) - if in.OnPremisesSpec != nil { - in, out := &in.OnPremisesSpec, &out.OnPremisesSpec - *out = new(OnPremisesSpec) - (*in).DeepCopyInto(*out) - } if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres *out = make([]*KafkaConnectDataCentre, len(*in)) @@ -1380,11 +1365,6 @@ func (in *KafkaList) DeepCopyObject() runtime.Object { func (in *KafkaSpec) DeepCopyInto(out *KafkaSpec) { *out = *in in.GenericClusterSpec.DeepCopyInto(&out.GenericClusterSpec) - if in.OnPremisesSpec != nil { - in, out := &in.OnPremisesSpec, &out.OnPremisesSpec - *out = new(OnPremisesSpec) - (*in).DeepCopyInto(*out) - } if in.UserRefs != nil { in, out := &in.UserRefs, &out.UserRefs *out = make(References, len(*in)) @@ -2154,11 +2134,6 @@ func (in *PgSpec) DeepCopyInto(out *PgSpec) { (*in).DeepCopyInto(*out) } in.Cluster.DeepCopyInto(&out.Cluster) - if in.OnPremisesSpec != nil { - in, out := &in.OnPremisesSpec, &out.OnPremisesSpec - *out = new(OnPremisesSpec) - (*in).DeepCopyInto(*out) - } if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres *out = make([]*PgDataCentre, len(*in)) @@ -2540,11 +2515,6 @@ func (in *RedisSpec) DeepCopyInto(out *RedisSpec) { *out = new(RedisRestoreFrom) (*in).DeepCopyInto(*out) } - if in.OnPremisesSpec != nil { - in, out := &in.OnPremisesSpec, &out.OnPremisesSpec - *out = new(OnPremisesSpec) - (*in).DeepCopyInto(*out) - } if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres *out = make([]*RedisDataCentre, len(*in)) diff --git a/config/crd/bases/clusters.instaclustr.com_cadences.yaml b/config/crd/bases/clusters.instaclustr.com_cadences.yaml index 5f429e4ed..1447943cd 100644 --- a/config/crd/bases/clusters.instaclustr.com_cadences.yaml +++ b/config/crd/bases/clusters.instaclustr.com_cadences.yaml @@ -129,49 +129,6 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string - onPremisesSpec: - properties: - cloudInitScriptRef: - description: ObjectReference is namespaced reference to an object - properties: - name: - type: string - namespace: - type: string - required: - - name - - namespace - type: object - dataDiskSize: - type: string - enableAutomation: - type: boolean - nodeCPU: - format: int64 - type: integer - nodeMemory: - type: string - osDiskSize: - type: string - osImageURL: - type: string - sshGatewayCPU: - format: int64 - type: integer - sshGatewayMemory: - type: string - storageClassName: - type: string - required: - - cloudInitScriptRef - - dataDiskSize - - enableAutomation - - nodeCPU - - nodeMemory - - osDiskSize - - osImageURL - - storageClassName - type: object packagedProvisioning: items: properties: diff --git a/config/crd/bases/clusters.instaclustr.com_cassandras.yaml b/config/crd/bases/clusters.instaclustr.com_cassandras.yaml index 181d29b0a..f4f3c2d2e 100644 --- a/config/crd/bases/clusters.instaclustr.com_cassandras.yaml +++ b/config/crd/bases/clusters.instaclustr.com_cassandras.yaml @@ -157,49 +157,6 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string - onPremisesSpec: - properties: - cloudInitScriptRef: - description: ObjectReference is namespaced reference to an object - properties: - name: - type: string - namespace: - type: string - required: - - name - - namespace - type: object - dataDiskSize: - type: string - enableAutomation: - type: boolean - nodeCPU: - format: int64 - type: integer - nodeMemory: - type: string - osDiskSize: - type: string - osImageURL: - type: string - sshGatewayCPU: - format: int64 - type: integer - sshGatewayMemory: - type: string - storageClassName: - type: string - required: - - cloudInitScriptRef - - dataDiskSize - - enableAutomation - - nodeCPU - - nodeMemory - - osDiskSize - - osImageURL - - storageClassName - type: object passwordAndUserAuth: type: boolean pciCompliance: diff --git a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml index d4f0b79df..69910853b 100644 --- a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml +++ b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml @@ -183,49 +183,6 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string - onPremisesSpec: - properties: - cloudInitScriptRef: - description: ObjectReference is namespaced reference to an object - properties: - name: - type: string - namespace: - type: string - required: - - name - - namespace - type: object - dataDiskSize: - type: string - enableAutomation: - type: boolean - nodeCPU: - format: int64 - type: integer - nodeMemory: - type: string - osDiskSize: - type: string - osImageURL: - type: string - sshGatewayCPU: - format: int64 - type: integer - sshGatewayMemory: - type: string - storageClassName: - type: string - required: - - cloudInitScriptRef - - dataDiskSize - - enableAutomation - - nodeCPU - - nodeMemory - - osDiskSize - - osImageURL - - storageClassName - type: object pciCompliance: description: The PCI compliance standards relate to the security of user data and transactional information. Can only be applied clusters diff --git a/config/crd/bases/clusters.instaclustr.com_kafkas.yaml b/config/crd/bases/clusters.instaclustr.com_kafkas.yaml index 1659e69dd..0665a2faa 100644 --- a/config/crd/bases/clusters.instaclustr.com_kafkas.yaml +++ b/config/crd/bases/clusters.instaclustr.com_kafkas.yaml @@ -170,49 +170,6 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string - onPremisesSpec: - properties: - cloudInitScriptRef: - description: ObjectReference is namespaced reference to an object - properties: - name: - type: string - namespace: - type: string - required: - - name - - namespace - type: object - dataDiskSize: - type: string - enableAutomation: - type: boolean - nodeCPU: - format: int64 - type: integer - nodeMemory: - type: string - osDiskSize: - type: string - osImageURL: - type: string - sshGatewayCPU: - format: int64 - type: integer - sshGatewayMemory: - type: string - storageClassName: - type: string - required: - - cloudInitScriptRef - - dataDiskSize - - enableAutomation - - nodeCPU - - nodeMemory - - osDiskSize - - osImageURL - - storageClassName - type: object partitionsNumber: description: PartitionsNumber number of partitions to use when created new topics. diff --git a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml index b34162aa1..e6b0ac9d8 100644 --- a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml +++ b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml @@ -153,49 +153,6 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string - onPremisesSpec: - properties: - cloudInitScriptRef: - description: ObjectReference is namespaced reference to an object - properties: - name: - type: string - namespace: - type: string - required: - - name - - namespace - type: object - dataDiskSize: - type: string - enableAutomation: - type: boolean - nodeCPU: - format: int64 - type: integer - nodeMemory: - type: string - osDiskSize: - type: string - osImageURL: - type: string - sshGatewayCPU: - format: int64 - type: integer - sshGatewayMemory: - type: string - storageClassName: - type: string - required: - - cloudInitScriptRef - - dataDiskSize - - enableAutomation - - nodeCPU - - nodeMemory - - osDiskSize - - osImageURL - - storageClassName - type: object pciCompliance: description: The PCI compliance standards relate to the security of user data and transactional information. Can only be applied clusters diff --git a/config/crd/bases/clusters.instaclustr.com_redis.yaml b/config/crd/bases/clusters.instaclustr.com_redis.yaml index fcdfc7ae6..06400ae3b 100644 --- a/config/crd/bases/clusters.instaclustr.com_redis.yaml +++ b/config/crd/bases/clusters.instaclustr.com_redis.yaml @@ -123,49 +123,6 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string - onPremisesSpec: - properties: - cloudInitScriptRef: - description: ObjectReference is namespaced reference to an object - properties: - name: - type: string - namespace: - type: string - required: - - name - - namespace - type: object - dataDiskSize: - type: string - enableAutomation: - type: boolean - nodeCPU: - format: int64 - type: integer - nodeMemory: - type: string - osDiskSize: - type: string - osImageURL: - type: string - sshGatewayCPU: - format: int64 - type: integer - sshGatewayMemory: - type: string - storageClassName: - type: string - required: - - cloudInitScriptRef - - dataDiskSize - - enableAutomation - - nodeCPU - - nodeMemory - - osDiskSize - - osImageURL - - storageClassName - type: object passwordAndUserAuth: description: Enables Password Authentication and User Authorization type: boolean diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 6356bde38..47c1851f3 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -22,7 +22,7 @@ bases: # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. - ../certmanager # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. -- ../prometheus +#- ../prometheus patchesStrategicMerge: # Protect the /metrics endpoint by putting it behind auth. diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index a9be51455..fa82a1e24 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -24,14 +24,6 @@ rules: verbs: - create - patch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch - apiGroups: - "" resources: diff --git a/config/samples/onpremises/clusters_v1beta1_cadence.yaml b/config/samples/onpremises/clusters_v1beta1_cadence.yaml deleted file mode 100644 index 5d9df66d8..000000000 --- a/config/samples/onpremises/clusters_v1beta1_cadence.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: clusters.instaclustr.com/v1beta1 -kind: Cadence -metadata: - name: cadence-sample -spec: - name: "cadence-test" - version: "1.0.0" - onPremisesSpec: - enableAutomation: true - storageClassName: managed-csi-premium - osDiskSize: 20Gi - dataDiskSize: 200Gi - sshGatewayCPU: 2 - sshGatewayMemory: 4096Mi - nodeCPU: 2 - nodeMemory: 8192Mi - osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw" - cloudInitScriptRef: - namespace: default - name: instaclustr-cloud-init-secret - standardProvisioning: - - targetCassandra: - dependencyCdcId: "9d43ac54-7317-4ce5-859a-e9d0443508a4" - dependencyVpcType: "TARGET_VPC" - privateNetworkCluster: false - dataCentres: - - region: "CLIENT_DC" - network: "10.1.0.0/16" - cloudProvider: "ONPREMISES" - name: "testdc" - nodeSize: "CAD-DEV-OP.4.8-200" - nodesNumber: 2 - clientEncryption: false - slaTier: "NON_PRODUCTION" - useCadenceWebAuth: false \ No newline at end of file diff --git a/config/samples/onpremises/clusters_v1beta1_cassandra.yaml b/config/samples/onpremises/clusters_v1beta1_cassandra.yaml index b7d4e5a99..c9372d910 100644 --- a/config/samples/onpremises/clusters_v1beta1_cassandra.yaml +++ b/config/samples/onpremises/clusters_v1beta1_cassandra.yaml @@ -1,24 +1,11 @@ apiVersion: clusters.instaclustr.com/v1beta1 kind: Cassandra metadata: - name: cassandra-on-prem-cluster + name: cassandra-on-prem-sample spec: - name: "danylo-on-prem-cassandra" - version: "4.0.10" - privateNetworkCluster: false - onPremisesSpec: - enableAutomation: true - storageClassName: managed-csi-premium - osDiskSize: 20Gi - dataDiskSize: 200Gi - sshGatewayCPU: 2 - sshGatewayMemory: 4096Mi - nodeCPU: 2 - nodeMemory: 8192Mi - osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw" - cloudInitScriptRef: - namespace: default - name: instaclustr-cloud-init-secret + name: "cassandra-on-prem-sample" + version: "4.1.3" + privateNetwork: true dataCentres: - name: "onPremCassandra" region: "CLIENT_DC" @@ -26,13 +13,11 @@ spec: continuousBackup: false nodesNumber: 2 replicationFactor: 2 - privateIpBroadcastForDiscovery: false + privateIpBroadcastForDiscovery: true network: "192.168.0.0/16" tags: "onprem": "test" - clientToClusterEncryption: false + clientToClusterEncryption: true nodeSize: "CAS-PRD-OP.4.8-200" - pciCompliance: false - luceneEnabled: false # can be enabled only on 3.11.13 version of Cassandra - passwordAndUserAuth: false + passwordAndUserAuth: true slaTier: "NON_PRODUCTION" diff --git a/config/samples/onpremises/clusters_v1beta1_kafka.yaml b/config/samples/onpremises/clusters_v1beta1_kafka.yaml index 9fe0e1754..96760b1d1 100644 --- a/config/samples/onpremises/clusters_v1beta1_kafka.yaml +++ b/config/samples/onpremises/clusters_v1beta1_kafka.yaml @@ -1,9 +1,9 @@ apiVersion: clusters.instaclustr.com/v1beta1 kind: Kafka metadata: - name: danylo-kafka + name: kafka-on-prem-sample spec: - name: "danylo-kafka" + name: "kafka-on-prem-sample" version: "3.3.1" pciCompliance: false replicationFactor: 3 @@ -11,21 +11,8 @@ spec: allowDeleteTopics: true autoCreateTopics: true clientToClusterEncryption: false - privateNetworkCluster: false + privateNetwork: false slaTier: "NON_PRODUCTION" - onPremisesSpec: - enableAutomation: true - storageClassName: managed-csi-premium - osDiskSize: 20Gi - dataDiskSize: 200Gi - sshGatewayCPU: 2 - sshGatewayMemory: 4096Mi - nodeCPU: 2 - nodeMemory: 8192Mi - osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw" - cloudInitScriptRef: - namespace: default - name: instaclustr-cloud-init-secret dataCentres: - name: "onPremKafka" nodesNumber: 3 diff --git a/config/samples/onpremises/clusters_v1beta1_kafkaconnect.yaml b/config/samples/onpremises/clusters_v1beta1_kafkaconnect.yaml index 75a47a502..ce75b1586 100644 --- a/config/samples/onpremises/clusters_v1beta1_kafkaconnect.yaml +++ b/config/samples/onpremises/clusters_v1beta1_kafkaconnect.yaml @@ -1,29 +1,16 @@ apiVersion: clusters.instaclustr.com/v1beta1 kind: KafkaConnect metadata: - name: kafkaconnect-sample + name: kafkaconnect-on-prem-sample spec: - name: "kafkaconnect-onprem" + name: "kafkaconnect-on-prem-sample" version: "3.5.1" - onPremisesSpec: - enableAutomation: true - storageClassName: managed-csi-premium - osDiskSize: 20Gi - dataDiskSize: 200Gi - sshGatewayCPU: 2 - sshGatewayMemory: 4096Mi - nodeCPU: 2 - nodeMemory: 8192Mi - osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw" - cloudInitScriptRef: - namespace: default - name: instaclustr-cloud-init-secret privateNetworkCluster: false slaTier: "NON_PRODUCTION" targetCluster: - managedCluster: - - targetKafkaClusterId: "34dfc53c-c8c1-4be8-bd2f-cfdb77ec7349" - kafkaConnectVpcType: "KAFKA_VPC" + - targetKafkaClusterId: "aa019bc1-fc9a-4226-9a95-86417b37b31e" + kafkaConnectVpcType: "SEPARATE_VPC" dataCentres: - name: "kafkaconnect-onprem" nodesNumber: 3 diff --git a/config/samples/onpremises/clusters_v1beta1_postgresql.yaml b/config/samples/onpremises/clusters_v1beta1_postgresql.yaml index 35a3c78fe..b1235aec3 100644 --- a/config/samples/onpremises/clusters_v1beta1_postgresql.yaml +++ b/config/samples/onpremises/clusters_v1beta1_postgresql.yaml @@ -1,28 +1,15 @@ apiVersion: clusters.instaclustr.com/v1beta1 kind: PostgreSQL metadata: - name: postgresql-sample + name: postgresql-on-prem-sample spec: - name: "postgresql-sample" + name: "postgresql-on-prem-sample" version: "15.4.0" - onPremisesSpec: - enableAutomation: true - storageClassName: managed-csi-premium - osDiskSize: 20Gi - dataDiskSize: 200Gi - sshGatewayCPU: 2 - sshGatewayMemory: 4096Mi - nodeCPU: 2 - nodeMemory: 8192Mi - osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw" - cloudInitScriptRef: - namespace: default - name: instaclustr-cloud-init-secret dataCentres: - region: "CLIENT_DC" network: "10.1.0.0/16" cloudProvider: "ONPREMISES" - nodeSize: "PGS-DEV-OP.4.8-200" + nodeSize: "PSQL-DEV-OP.4.8-200" nodesNumber: 2 clientEncryption: false name: "testDC1" @@ -31,5 +18,5 @@ spec: interDataCentreReplication: - isPrimaryDataCentre: true slaTier: "NON_PRODUCTION" - privateNetworkCluster: false + privateNetwork: false synchronousModeStrict: false \ No newline at end of file diff --git a/config/samples/onpremises/clusters_v1beta1_redis.yaml b/config/samples/onpremises/clusters_v1beta1_redis.yaml index 7a092423a..f47980982 100644 --- a/config/samples/onpremises/clusters_v1beta1_redis.yaml +++ b/config/samples/onpremises/clusters_v1beta1_redis.yaml @@ -1,33 +1,19 @@ apiVersion: clusters.instaclustr.com/v1beta1 kind: Redis metadata: - name: danylo-redis + name: redis-on-prem-redis spec: - name: "danylo-redis" + name: "redis-on-prem-redis" version: "7.0.14" - onPremisesSpec: - enableAutomation: true - storageClassName: managed-csi-premium - osDiskSize: 20Gi - dataDiskSize: 200Gi - sshGatewayCPU: 2 - sshGatewayMemory: 4096Mi - nodeCPU: 2 - nodeMemory: 8192Mi - osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw" - cloudInitScriptRef: - namespace: default - name: instaclustr-cloud-init-secret slaTier: "NON_PRODUCTION" clientEncryption: false passwordAndUserAuth: true - privateNetworkCluster: false + privateNetwork: false dataCentres: - - region: "CLIENT_DC" + - region: "CLIENa_DC" name: "onPremRedis" cloudProvider: "ONPREMISES" network: "10.1.0.0/16" nodeSize: "RDS-PRD-OP.8.64-400" masterNodes: 3 - nodesNumber: 0 replicationFactor: 0 \ No newline at end of file diff --git a/controllers/clusters/cadence_controller.go b/controllers/clusters/cadence_controller.go index a1789154d..97c47c561 100644 --- a/controllers/clusters/cadence_controller.go +++ b/controllers/clusters/cadence_controller.go @@ -22,6 +22,12 @@ import ( "fmt" "github.com/go-logr/logr" + "github.com/instaclustr/operator/apis/clusters/v1beta1" + "github.com/instaclustr/operator/pkg/exposeservice" + "github.com/instaclustr/operator/pkg/instaclustr" + "github.com/instaclustr/operator/pkg/models" + rlimiter "github.com/instaclustr/operator/pkg/ratelimiter" + "github.com/instaclustr/operator/pkg/scheduler" k8serrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -36,14 +42,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/ratelimiter" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/instaclustr/operator/apis/clusters/v1beta1" - "github.com/instaclustr/operator/pkg/exposeservice" - "github.com/instaclustr/operator/pkg/instaclustr" - "github.com/instaclustr/operator/pkg/models" - rlimiter "github.com/instaclustr/operator/pkg/ratelimiter" - "github.com/instaclustr/operator/pkg/scheduler" ) // CadenceReconciler reconciles a Cadence object @@ -250,83 +248,6 @@ func (r *CadenceReconciler) handleCreateCluster( r.EventRecorder.Event(c, models.Normal, models.Created, "Cluster status check job is started") } - if c.Spec.OnPremisesSpec != nil && c.Spec.OnPremisesSpec.EnableAutomation { - iData, err := r.API.GetCadence(c.Status.ID) - if err != nil { - l.Error(err, "Cannot get cluster from the Instaclustr API", - "cluster name", c.Spec.Name, - "data centres", c.Spec.DataCentres, - "cluster ID", c.Status.ID, - ) - r.EventRecorder.Eventf( - c, models.Warning, models.FetchFailed, - "Cluster fetch from the Instaclustr API is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - iCadence, err := c.FromInstAPI(iData) - if err != nil { - l.Error( - err, "Cannot convert cluster from the Instaclustr API", - "cluster name", c.Spec.Name, - "cluster ID", c.Status.ID, - ) - r.EventRecorder.Eventf( - c, models.Warning, models.ConversionFailed, - "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - bootstrap := newOnPremisesBootstrap( - r.Client, - c, - r.EventRecorder, - iCadence.Status.ClusterStatus, - c.Spec.OnPremisesSpec, - newExposePorts(c.GetExposePorts()), - c.GetHeadlessPorts(), - c.Spec.PrivateNetworkCluster, - ) - - err = handleCreateOnPremisesClusterResources(ctx, bootstrap) - if err != nil { - l.Error( - err, "Cannot create resources for on-premises cluster", - "cluster spec", c.Spec.OnPremisesSpec, - ) - r.EventRecorder.Eventf( - c, models.Warning, models.CreationFailed, - "Resources creation for on-premises cluster is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - err = r.startClusterOnPremisesIPsJob(c, bootstrap) - if err != nil { - l.Error(err, "Cannot start on-premises cluster IPs check job", - "cluster ID", c.Status.ID, - ) - - r.EventRecorder.Eventf( - c, models.Warning, models.CreationFailed, - "On-premises cluster IPs check job is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - l.Info( - "On-premises resources have been created", - "cluster name", c.Spec.Name, - "on-premises Spec", c.Spec.OnPremisesSpec, - "cluster ID", c.Status.ID, - ) - return models.ExitReconcile, nil - } return ctrl.Result{}, nil } @@ -491,42 +412,6 @@ func (r *CadenceReconciler) handleDeleteCluster( "Two-Factor Delete is enabled, please confirm cluster deletion via email or phone.") return ctrl.Result{}, nil } - if c.Spec.OnPremisesSpec != nil && c.Spec.OnPremisesSpec.EnableAutomation { - err = deleteOnPremResources(ctx, r.Client, c.Status.ID, c.Namespace) - if err != nil { - l.Error(err, "Cannot delete cluster on-premises resources", - "cluster ID", c.Status.ID) - r.EventRecorder.Eventf(c, models.Warning, models.DeletionFailed, - "Cluster on-premises resources deletion is failed. Reason: %v", err) - return reconcile.Result{}, err - } - - l.Info("Cluster on-premises resources are deleted", - "cluster ID", c.Status.ID) - r.EventRecorder.Eventf(c, models.Normal, models.Deleted, - "Cluster on-premises resources are deleted") - - patch := c.NewPatch() - controllerutil.RemoveFinalizer(c, models.DeletionFinalizer) - - err = r.Patch(ctx, c, patch) - if err != nil { - l.Error(err, "Cannot patch cluster resource", - "cluster name", c.Spec.Name, - "cluster ID", c.Status.ID, - "kind", c.Kind, - "api Version", c.APIVersion, - "namespace", c.Namespace, - "cluster metadata", c.ObjectMeta, - ) - r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed, - "Cluster resource patch is failed. Reason: %v", err) - return reconcile.Result{}, err - } - - return reconcile.Result{}, err - } - r.Scheduler.RemoveJob(c.GetJobID(scheduler.OnPremisesIPsChecker)) } l.Info("Cadence cluster is being deleted", @@ -824,17 +709,6 @@ func (r *CadenceReconciler) newCassandraSpec(c *v1beta1.Cadence, latestCassandra }, nil } -func (r *CadenceReconciler) startClusterOnPremisesIPsJob(c *v1beta1.Cadence, b *onPremisesBootstrap) error { - job := newWatchOnPremisesIPsJob(c.Kind, b) - - err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job) - if err != nil { - return err - } - - return nil -} - func (r *CadenceReconciler) startClusterStatusJob(c *v1beta1.Cadence) error { job := r.newWatchStatusJob(c) diff --git a/controllers/clusters/cassandra_controller.go b/controllers/clusters/cassandra_controller.go index 7a49b94c1..f0af56ded 100644 --- a/controllers/clusters/cassandra_controller.go +++ b/controllers/clusters/cassandra_controller.go @@ -301,74 +301,6 @@ func (r *CassandraReconciler) startClusterJobs(c *v1beta1.Cassandra, l logr.Logg return nil } -func (r *CassandraReconciler) handleOnPremises(c *v1beta1.Cassandra, l logr.Logger) (reconcile.Result, error) { - instaModel, err := r.API.GetCassandra(c.Status.ID) - if err != nil { - l.Error(err, "Cannot get cluster from the Instaclustr API", - "cluster name", c.Spec.Name, - "data centres", c.Spec.DataCentres, - "cluster ID", c.Status.ID, - ) - r.EventRecorder.Eventf( - c, models.Warning, models.FetchFailed, - "Cluster fetch from the Instaclustr API is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - iCassandra := v1beta1.Cassandra{} - iCassandra.FromInstAPI(instaModel) - - bootstrap := newOnPremisesBootstrap( - r.Client, - c, - r.EventRecorder, - iCassandra.Status.ToOnPremises(), - c.Spec.OnPremisesSpec, - newExposePorts(c.GetExposePorts()), - c.GetHeadlessPorts(), - c.Spec.PrivateNetwork, - ) - - err = handleCreateOnPremisesClusterResources(context.Background(), bootstrap) - if err != nil { - l.Error( - err, "Cannot create resources for on-premises cluster", - "cluster spec", c.Spec.OnPremisesSpec, - ) - r.EventRecorder.Eventf( - c, models.Warning, models.CreationFailed, - "Resources creation for on-premises cluster is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - err = r.startClusterOnPremisesIPsJob(c, bootstrap) - if err != nil { - l.Error(err, "Cannot start on-premises cluster IPs check job", - "cluster ID", c.Status.ID, - ) - - r.EventRecorder.Eventf( - c, models.Warning, models.CreationFailed, - "On-premises cluster IPs check job is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - l.Info( - "On-premises resources have been created", - "cluster name", c.Spec.Name, - "on-premises Spec", c.Spec.OnPremisesSpec, - "cluster ID", c.Status.ID, - ) - - return models.ExitReconcile, nil -} - func (r *CassandraReconciler) handleCreateCluster( ctx context.Context, l logr.Logger, @@ -399,9 +331,6 @@ func (r *CassandraReconciler) handleCreateCluster( return reconcile.Result{}, fmt.Errorf("failed to start cluster jobs, err: %w", err) } - if c.Spec.OnPremisesSpec != nil && c.Spec.OnPremisesSpec.EnableAutomation { - return r.handleOnPremises(c, l) - } } return models.ExitReconcile, nil @@ -588,23 +517,6 @@ func (r *CassandraReconciler) handleDeleteCluster( r.Scheduler.RemoveJob(c.GetJobID(scheduler.BackupsChecker)) r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) - if c.Spec.OnPremisesSpec != nil && c.Spec.OnPremisesSpec.EnableAutomation { - err = deleteOnPremResources(ctx, r.Client, c.Status.ID, c.Namespace) - if err != nil { - l.Error(err, "Cannot delete cluster on-premises resources", - "cluster ID", c.Status.ID) - r.EventRecorder.Eventf(c, models.Warning, models.DeletionFailed, - "Cluster on-premises resources deletion is failed. Reason: %v", err) - return reconcile.Result{}, err - } - - l.Info("Cluster on-premises resources are deleted", - "cluster ID", c.Status.ID) - r.EventRecorder.Eventf(c, models.Normal, models.Deleted, - "Cluster on-premises resources are deleted") - r.Scheduler.RemoveJob(c.GetJobID(scheduler.OnPremisesIPsChecker)) - } - l.Info("Deleting cluster backup resources", "cluster ID", c.Status.ID) err = r.deleteBackups(ctx, c.Status.ID, c.Namespace) @@ -716,6 +628,7 @@ func (r *CassandraReconciler) startUsersCreationJob(cluster *v1beta1.Cassandra) return nil } +//nolint:unused,deadcode func (r *CassandraReconciler) startClusterOnPremisesIPsJob(c *v1beta1.Cassandra, b *onPremisesBootstrap) error { job := newWatchOnPremisesIPsJob(c.Kind, b) diff --git a/controllers/clusters/kafka_controller.go b/controllers/clusters/kafka_controller.go index 750915428..38b04ce09 100644 --- a/controllers/clusters/kafka_controller.go +++ b/controllers/clusters/kafka_controller.go @@ -172,76 +172,6 @@ func (r *KafkaReconciler) startJobs(k *v1beta1.Kafka) error { return nil } -func (r *KafkaReconciler) handleOnPremisesCreation(ctx context.Context, k *v1beta1.Kafka, l logr.Logger) error { - instaModel, err := r.API.GetKafka(k.Status.ID) - if err != nil { - l.Error(err, - "Cannot get cluster from the Instaclustr API", - "cluster name", k.Spec.Name, - "data centres", k.Spec.DataCentres, - "cluster ID", k.Status.ID, - ) - r.EventRecorder.Eventf( - k, models.Warning, models.FetchFailed, - "Cluster fetch from the Instaclustr API is failed. Reason: %v", - err, - ) - return err - } - - iKafka := v1beta1.Kafka{} - iKafka.FromInstAPI(instaModel) - - bootstrap := newOnPremisesBootstrap( - r.Client, - k, - r.EventRecorder, - iKafka.Status.ToOnPremises(), - k.Spec.OnPremisesSpec, - newExposePorts(k.GetExposePorts()), - k.GetHeadlessPorts(), - k.Spec.PrivateNetwork, - ) - - err = handleCreateOnPremisesClusterResources(ctx, bootstrap) - if err != nil { - l.Error(err, - "Cannot create resources for on-premises cluster", - "cluster spec", k.Spec.OnPremisesSpec, - ) - r.EventRecorder.Eventf( - k, models.Warning, models.CreationFailed, - "Resources creation for on-premises cluster is failed. Reason: %v", - err, - ) - return err - } - - err = r.startClusterOnPremisesIPsJob(k, bootstrap) - if err != nil { - l.Error(err, - "Cannot start on-premises cluster IPs check job", - "cluster ID", k.Status.ID, - ) - - r.EventRecorder.Eventf( - k, models.Warning, models.CreationFailed, - "On-premises cluster IPs check job is failed. Reason: %v", - err, - ) - return err - } - - l.Info( - "On-premises resources have been created", - "cluster name", k.Spec.Name, - "on-premises Spec", k.Spec.OnPremisesSpec, - "cluster ID", k.Status.ID, - ) - - return nil -} - func (r *KafkaReconciler) handleCreateCluster(ctx context.Context, k *v1beta1.Kafka, l logr.Logger) (reconcile.Result, error) { l = l.WithName("Kafka creation Event") @@ -273,16 +203,6 @@ func (r *KafkaReconciler) handleCreateCluster(ctx context.Context, k *v1beta1.Ka ) return reconcile.Result{}, err } - - if k.Spec.OnPremisesSpec != nil && k.Spec.OnPremisesSpec.EnableAutomation { - err = r.handleOnPremisesCreation(ctx, k, l) - if err != nil { - r.EventRecorder.Eventf(k, models.Warning, models.CreationFailed, - "Failed to handle OnPremises cluster creation. Reason: %v", err, - ) - return reconcile.Result{}, err - } - } } return models.ExitReconcile, nil @@ -454,23 +374,6 @@ func (r *KafkaReconciler) handleDeleteCluster(ctx context.Context, k *v1beta1.Ka return reconcile.Result{}, err } - if k.Spec.OnPremisesSpec != nil && k.Spec.OnPremisesSpec.EnableAutomation { - err = deleteOnPremResources(ctx, r.Client, k.Status.ID, k.Namespace) - if err != nil { - l.Error(err, "Cannot delete cluster on-premises resources", - "cluster ID", k.Status.ID) - r.EventRecorder.Eventf(k, models.Warning, models.DeletionFailed, - "Cluster on-premises resources deletion is failed. Reason: %v", err) - return reconcile.Result{}, err - } - - l.Info("Cluster on-premises resources are deleted", - "cluster ID", k.Status.ID) - r.EventRecorder.Eventf(k, models.Normal, models.Deleted, - "Cluster on-premises resources are deleted") - r.Scheduler.RemoveJob(k.GetJobID(scheduler.OnPremisesIPsChecker)) - } - r.Scheduler.RemoveJob(k.GetJobID(scheduler.StatusChecker)) r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator)) controllerutil.RemoveFinalizer(k, models.DeletionFinalizer) @@ -509,6 +412,7 @@ func (r *KafkaReconciler) handleDeleteCluster(ctx context.Context, k *v1beta1.Ka return models.ExitReconcile, nil } +//nolint:unused,deadcode func (r *KafkaReconciler) startClusterOnPremisesIPsJob(k *v1beta1.Kafka, b *onPremisesBootstrap) error { job := newWatchOnPremisesIPsJob(k.Kind, b) diff --git a/controllers/clusters/kafkaconnect_controller.go b/controllers/clusters/kafkaconnect_controller.go index 80e29ec27..ee4b2b7f6 100644 --- a/controllers/clusters/kafkaconnect_controller.go +++ b/controllers/clusters/kafkaconnect_controller.go @@ -217,83 +217,6 @@ func (r *KafkaConnectReconciler) handleCreateCluster(ctx context.Context, kc *v1 "Cluster status check job is started", ) } - if kc.Spec.OnPremisesSpec != nil && kc.Spec.OnPremisesSpec.EnableAutomation { - iData, err := r.API.GetKafkaConnect(kc.Status.ID) - if err != nil { - l.Error(err, "Cannot get cluster from the Instaclustr API", - "cluster name", kc.Spec.Name, - "data centres", kc.Spec.DataCentres, - "cluster ID", kc.Status.ID, - ) - r.EventRecorder.Eventf( - kc, models.Warning, models.FetchFailed, - "Cluster fetch from the Instaclustr API is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - iKafkaConnect, err := kc.FromInst(iData) - if err != nil { - l.Error( - err, "Cannot convert cluster from the Instaclustr API", - "cluster name", kc.Spec.Name, - "cluster ID", kc.Status.ID, - ) - r.EventRecorder.Eventf( - kc, models.Warning, models.ConversionFailed, - "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - bootstrap := newOnPremisesBootstrap( - r.Client, - kc, - r.EventRecorder, - iKafkaConnect.Status.ClusterStatus, - kc.Spec.OnPremisesSpec, - newExposePorts(kc.GetExposePorts()), - kc.GetHeadlessPorts(), - kc.Spec.PrivateNetworkCluster, - ) - - err = handleCreateOnPremisesClusterResources(ctx, bootstrap) - if err != nil { - l.Error( - err, "Cannot create resources for on-premises cluster", - "cluster spec", kc.Spec.OnPremisesSpec, - ) - r.EventRecorder.Eventf( - kc, models.Warning, models.CreationFailed, - "Resources creation for on-premises cluster is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - err = r.startClusterOnPremisesIPsJob(kc, bootstrap) - if err != nil { - l.Error(err, "Cannot start on-premises cluster IPs check job", - "cluster ID", kc.Status.ID, - ) - - r.EventRecorder.Eventf( - kc, models.Warning, models.CreationFailed, - "On-premises cluster IPs check job is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - l.Info( - "On-premises resources have been created", - "cluster name", kc.Spec.Name, - "on-premises Spec", kc.Spec.OnPremisesSpec, - "cluster ID", kc.Status.ID, - ) - return models.ExitReconcile, nil - } return models.ExitReconcile, nil } @@ -453,23 +376,6 @@ func (r *KafkaConnectReconciler) handleDeleteCluster(ctx context.Context, kc *v1 return models.ExitReconcile, nil } - - if kc.Spec.OnPremisesSpec != nil && kc.Spec.OnPremisesSpec.EnableAutomation { - err = deleteOnPremResources(ctx, r.Client, kc.Status.ID, kc.Namespace) - if err != nil { - l.Error(err, "Cannot delete cluster on-premises resources", - "cluster ID", kc.Status.ID) - r.EventRecorder.Eventf(kc, models.Warning, models.DeletionFailed, - "Cluster on-premises resources deletion is failed. Reason: %v", err) - return reconcile.Result{}, err - } - - l.Info("Cluster on-premises resources are deleted", - "cluster ID", kc.Status.ID) - r.EventRecorder.Eventf(kc, models.Normal, models.Deleted, - "Cluster on-premises resources are deleted") - r.Scheduler.RemoveJob(kc.GetJobID(scheduler.OnPremisesIPsChecker)) - } } err = deleteDefaultUserSecret(ctx, r.Client, client.ObjectKeyFromObject(kc)) @@ -552,6 +458,7 @@ func (r *KafkaConnectReconciler) createDefaultSecret(ctx context.Context, kc *v1 return nil } +//nolint:unused,deadcode func (r *KafkaConnectReconciler) startClusterOnPremisesIPsJob(k *v1beta1.KafkaConnect, b *onPremisesBootstrap) error { job := newWatchOnPremisesIPsJob(k.Kind, b) diff --git a/controllers/clusters/on_premises.go b/controllers/clusters/on_premises.go index 18fefaf67..22d4bbd72 100644 --- a/controllers/clusters/on_premises.go +++ b/controllers/clusters/on_premises.go @@ -40,6 +40,7 @@ import ( "github.com/instaclustr/operator/pkg/scheduler" ) +//nolint:unused,deadcode type onPremisesBootstrap struct { K8sClient client.Client K8sObject client.Object @@ -51,6 +52,7 @@ type onPremisesBootstrap struct { PrivateNetworkCluster bool } +//nolint:unused,deadcode func newOnPremisesBootstrap( k8sClient client.Client, o client.Object, @@ -73,6 +75,7 @@ func newOnPremisesBootstrap( } } +//nolint:unused,deadcode func handleCreateOnPremisesClusterResources(ctx context.Context, b *onPremisesBootstrap) error { if len(b.ClusterStatus.DataCentres) < 1 { return fmt.Errorf("datacenter ID is empty") @@ -93,6 +96,7 @@ func handleCreateOnPremisesClusterResources(ctx context.Context, b *onPremisesBo return nil } +//nolint:unused,deadcode func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap) error { gatewayDVSize, err := resource.ParseQuantity(b.OnPremisesSpec.OSDiskSize) if err != nil { @@ -197,6 +201,7 @@ func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap) e return nil } +//nolint:unused,deadcode func reconcileNodesResources(ctx context.Context, b *onPremisesBootstrap) error { for i, node := range b.ClusterStatus.DataCentres[0].Nodes { nodeOSDiskSize, err := resource.ParseQuantity(b.OnPremisesSpec.OSDiskSize) @@ -345,6 +350,7 @@ func reconcileNodesResources(ctx context.Context, b *onPremisesBootstrap) error return nil } +//nolint:unused,deadcode func createDV( ctx context.Context, b *onPremisesBootstrap, @@ -377,6 +383,7 @@ func createDV( return dv, nil } +//nolint:unused,deadcode func newDataDiskDV( b *onPremisesBootstrap, name, @@ -423,6 +430,7 @@ func newDataDiskDV( } } +//nolint:unused,deadcode func newVM( ctx context.Context, b *onPremisesBootstrap, @@ -578,6 +586,7 @@ func newVM( return vm, nil } +//nolint:unused,deadcode func newExposeService( b *onPremisesBootstrap, svcName, @@ -609,6 +618,7 @@ func newExposeService( } } +//nolint:unused,deadcode func newHeadlessService( b *onPremisesBootstrap, svcName string, @@ -637,6 +647,7 @@ func newHeadlessService( } } +//nolint:unused,deadcode func deleteOnPremResources(ctx context.Context, K8sClient client.Client, clusterID, ns string) error { vms := &virtcorev1.VirtualMachineList{} err := K8sClient.List(ctx, vms, &client.ListOptions{ @@ -741,6 +752,7 @@ func deleteOnPremResources(ctx context.Context, K8sClient client.Client, cluster return nil } +//nolint:unused,deadcode func newExposePorts(sp []k8scorev1.ServicePort) []k8scorev1.ServicePort { var ports []k8scorev1.ServicePort ports = []k8scorev1.ServicePort{{ @@ -758,6 +770,7 @@ func newExposePorts(sp []k8scorev1.ServicePort) []k8scorev1.ServicePort { return ports } +//nolint:unused,deadcode func newWatchOnPremisesIPsJob(kind string, b *onPremisesBootstrap) scheduler.Job { l := log.Log.WithValues("component", fmt.Sprintf("%sOnPremisesIPsCheckerJob", kind)) @@ -895,6 +908,7 @@ func newWatchOnPremisesIPsJob(kind string, b *onPremisesBootstrap) scheduler.Job } } +//nolint:unused,deadcode func newClusterIPService( b *onPremisesBootstrap, svcName, diff --git a/controllers/clusters/postgresql_controller.go b/controllers/clusters/postgresql_controller.go index c992d3b3b..b63956d01 100644 --- a/controllers/clusters/postgresql_controller.go +++ b/controllers/clusters/postgresql_controller.go @@ -258,81 +258,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster( "Cluster status check job is started", ) - if pg.Spec.OnPremisesSpec != nil && pg.Spec.OnPremisesSpec.EnableAutomation { - iData, err := r.API.GetPostgreSQL(pg.Status.ID) - if err != nil { - l.Error(err, "Cannot get cluster from the Instaclustr API", - "cluster name", pg.Spec.Name, - "data centres", pg.Spec.DataCentres, - "cluster ID", pg.Status.ID, - ) - r.EventRecorder.Eventf( - pg, models.Warning, models.FetchFailed, - "Cluster fetch from the Instaclustr API is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - iPostgreSQL, err := pg.FromInstAPI(iData) - if err != nil { - l.Error( - err, "Cannot convert cluster from the Instaclustr API", - "cluster name", pg.Spec.Name, - "cluster ID", pg.Status.ID, - ) - r.EventRecorder.Eventf( - pg, models.Warning, models.ConversionFailed, - "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - bootstrap := newOnPremisesBootstrap( - r.Client, - pg, - r.EventRecorder, - iPostgreSQL.Status.ClusterStatus, - pg.Spec.OnPremisesSpec, - newExposePorts(pg.GetExposePorts()), - pg.GetHeadlessPorts(), - pg.Spec.PrivateNetworkCluster, - ) - - err = handleCreateOnPremisesClusterResources(ctx, bootstrap) - if err != nil { - l.Error( - err, "Cannot create resources for on-premises cluster", - "cluster spec", pg.Spec.OnPremisesSpec, - ) - r.EventRecorder.Eventf( - pg, models.Warning, models.CreationFailed, - "Resources creation for on-premises cluster is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - err = r.startClusterOnPremisesIPsJob(pg, bootstrap) - if err != nil { - l.Error(err, "Cannot start on-premises cluster IPs check job", - "cluster ID", pg.Status.ID, - ) - - r.EventRecorder.Eventf( - pg, models.Warning, models.CreationFailed, - "On-premises cluster IPs check job is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - l.Info( - "On-premises resources have been created", - "cluster name", pg.Spec.Name, - "on-premises Spec", pg.Spec.OnPremisesSpec, - "cluster ID", pg.Status.ID, - ) + if pg.Spec.DataCentres[0].CloudProvider == models.ONPREMISES { return models.ExitReconcile, nil } @@ -590,23 +516,6 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( return models.ExitReconcile, nil } - - if pg.Spec.OnPremisesSpec != nil && pg.Spec.OnPremisesSpec.EnableAutomation { - err = deleteOnPremResources(ctx, r.Client, pg.Status.ID, pg.Namespace) - if err != nil { - l.Error(err, "Cannot delete cluster on-premises resources", - "cluster ID", pg.Status.ID) - r.EventRecorder.Eventf(pg, models.Warning, models.DeletionFailed, - "Cluster on-premises resources deletion is failed. Reason: %v", err) - return reconcile.Result{}, err - } - - l.Info("Cluster on-premises resources are deleted", - "cluster ID", pg.Status.ID) - r.EventRecorder.Eventf(pg, models.Normal, models.Deleted, - "Cluster on-premises resources are deleted") - r.Scheduler.RemoveJob(pg.GetJobID(scheduler.OnPremisesIPsChecker)) - } } l.Info("PostgreSQL cluster is being deleted. Deleting PostgreSQL default user secret", @@ -792,6 +701,7 @@ func (r *PostgreSQLReconciler) handleUpdateDefaultUserPassword( return models.ExitReconcile, nil } +//nolint:unused,deadcode func (r *PostgreSQLReconciler) startClusterOnPremisesIPsJob(pg *v1beta1.PostgreSQL, b *onPremisesBootstrap) error { job := newWatchOnPremisesIPsJob(pg.Kind, b) diff --git a/controllers/clusters/redis_controller.go b/controllers/clusters/redis_controller.go index 130bdcc05..2cf17c04c 100644 --- a/controllers/clusters/redis_controller.go +++ b/controllers/clusters/redis_controller.go @@ -247,73 +247,6 @@ func (r *RedisReconciler) startClusterJobs(redis *v1beta1.Redis) error { return nil } -func (r *RedisReconciler) handleOnPremisesCreation(ctx context.Context, redis *v1beta1.Redis, l logr.Logger) error { - instaModel, err := r.API.GetRedis(redis.Status.ID) - if err != nil { - l.Error(err, "Cannot get cluster from the Instaclustr API", - "cluster name", redis.Spec.Name, - "data centres", redis.Spec.DataCentres, - "cluster ID", redis.Status.ID, - ) - r.EventRecorder.Eventf( - redis, models.Warning, models.FetchFailed, - "Cluster fetch from the Instaclustr API is failed. Reason: %v", - err, - ) - return err - } - - iRedis := v1beta1.Redis{} - iRedis.FromInstAPI(instaModel) - - bootstrap := newOnPremisesBootstrap( - r.Client, - redis, - r.EventRecorder, - iRedis.Status.ToOnPremises(), - redis.Spec.OnPremisesSpec, - newExposePorts(redis.GetExposePorts()), - redis.GetHeadlessPorts(), - redis.Spec.PrivateNetwork, - ) - - err = handleCreateOnPremisesClusterResources(ctx, bootstrap) - if err != nil { - l.Error( - err, "Cannot create resources for on-premises cluster", - "cluster spec", redis.Spec.OnPremisesSpec, - ) - r.EventRecorder.Eventf( - redis, models.Warning, models.CreationFailed, - "Resources creation for on-premises cluster is failed. Reason: %v", - err, - ) - return err - } - - err = r.startClusterOnPremisesIPsJob(redis, bootstrap) - if err != nil { - l.Error(err, "Cannot start on-premises cluster IPs check job", - "cluster ID", redis.Status.ID, - ) - - r.EventRecorder.Eventf( - redis, models.Warning, models.CreationFailed, - "On-premises cluster IPs check job is failed. Reason: %v", - err, - ) - return err - } - - l.Info("On-premises resources have been created", - "cluster name", redis.Spec.Name, - "on-premises Spec", redis.Spec.OnPremisesSpec, - "cluster ID", redis.Status.ID, - ) - - return nil -} - func (r *RedisReconciler) handleCreateCluster( ctx context.Context, redis *v1beta1.Redis, @@ -343,13 +276,6 @@ func (r *RedisReconciler) handleCreateCluster( if err != nil { return reconcile.Result{}, err } - - if redis.Spec.OnPremisesSpec != nil && redis.Spec.OnPremisesSpec.EnableAutomation { - err = r.handleOnPremisesCreation(ctx, redis, l) - if err != nil { - return reconcile.Result{}, err - } - } } l.Info( @@ -544,22 +470,6 @@ func (r *RedisReconciler) handleDeleteCluster( return models.ExitReconcile, nil } - if redis.Spec.OnPremisesSpec != nil && redis.Spec.OnPremisesSpec.EnableAutomation { - err = deleteOnPremResources(ctx, r.Client, redis.Status.ID, redis.Namespace) - if err != nil { - l.Error(err, "Cannot delete cluster on-premises resources", - "cluster ID", redis.Status.ID) - r.EventRecorder.Eventf(redis, models.Warning, models.DeletionFailed, - "Cluster on-premises resources deletion is failed. Reason: %v", err) - return reconcile.Result{}, err - } - - l.Info("Cluster on-premises resources are deleted", - "cluster ID", redis.Status.ID) - r.EventRecorder.Eventf(redis, models.Normal, models.Deleted, - "Cluster on-premises resources are deleted") - r.Scheduler.RemoveJob(redis.GetJobID(scheduler.OnPremisesIPsChecker)) - } } r.Scheduler.RemoveJob(redis.GetJobID(scheduler.StatusChecker)) @@ -641,6 +551,7 @@ func (r *RedisReconciler) handleDeleteCluster( return models.ExitReconcile, nil } +//nolint:unused,deadcode func (r *RedisReconciler) startClusterOnPremisesIPsJob(redis *v1beta1.Redis, b *onPremisesBootstrap) error { job := newWatchOnPremisesIPsJob(redis.Kind, b) diff --git a/pkg/models/validation.go b/pkg/models/validation.go index 410275d55..8dea232c3 100644 --- a/pkg/models/validation.go +++ b/pkg/models/validation.go @@ -21,7 +21,7 @@ var ( KafkaConnectVPCTypes = []string{"KAFKA_VPC", "VPC_PEERED", "SEPARATE_VPC"} PoolModes = []string{"TRANSACTION", "SESSION", "STATEMENT"} ReplicationModes = []string{"ASYNCHRONOUS", "SYNCHRONOUS"} - CloudProviders = []string{"AWS_VPC", "GCP", "AZURE_AZ"} + CloudProviders = []string{"AWS_VPC", "GCP", "AZURE_AZ", "ONPREMISES"} SLATiers = []string{"PRODUCTION", "NON_PRODUCTION"} ClusterNameRegExp = "^[a-zA-Z0-9][a-zA-Z0-9_-]{2,31}$" BundleTypes = []string{"APACHE_ZOOKEEPER", "CADENCE", "CADENCE_GRPC",