diff --git a/apis/clusters/v1beta1/cadence_types.go b/apis/clusters/v1beta1/cadence_types.go
index f3b394f27..ff5edd2b7 100644
--- a/apis/clusters/v1beta1/cadence_types.go
+++ b/apis/clusters/v1beta1/cadence_types.go
@@ -22,9 +22,11 @@ import (
"fmt"
"regexp"
+ k8scorev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/instaclustr/operator/pkg/models"
@@ -62,7 +64,8 @@ type BundledOpenSearchSpec struct {
// CadenceSpec defines the desired state of Cadence
type CadenceSpec struct {
- Cluster `json:",inline"`
+ Cluster `json:",inline"`
+ OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"`
//+kubebuilder:validation:MinItems:=1
//+kubebuilder:validation:MaxItems:=1
DataCentres []*CadenceDataCentre `json:"dataCentres"`
@@ -793,3 +796,64 @@ func (o *BundledOpenSearchSpec) validate() error {
return nil
}
+
+func (c *Cadence) GetExposePorts() []k8scorev1.ServicePort {
+ var exposePorts []k8scorev1.ServicePort
+ if !c.Spec.PrivateNetworkCluster {
+ exposePorts = []k8scorev1.ServicePort{
+ {
+ Name: models.CadenceTChannel,
+ Port: models.Port7933,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port7933,
+ },
+ },
+ {
+ Name: models.CadenceWeb,
+ Port: models.Port8088,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port8088,
+ },
+ },
+ }
+ if c.Spec.DataCentres[0].ClientEncryption {
+ sslPort := k8scorev1.ServicePort{
+ Name: models.CadenceGRPC,
+ Port: models.Port7833,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port7833,
+ },
+ }
+ exposePorts = append(exposePorts, sslPort)
+ }
+ }
+ return exposePorts
+}
+
+func (c *Cadence) GetHeadlessPorts() []k8scorev1.ServicePort {
+ headlessPorts := []k8scorev1.ServicePort{
+ {
+ Name: models.CadenceTChannel,
+ Port: models.Port7933,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port7933,
+ },
+ },
+ }
+ if c.Spec.DataCentres[0].ClientEncryption {
+ sslPort := k8scorev1.ServicePort{
+ Name: models.CadenceGRPC,
+ Port: models.Port7833,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port7833,
+ },
+ }
+ headlessPorts = append(headlessPorts, sslPort)
+ }
+ return headlessPorts
+}
diff --git a/apis/clusters/v1beta1/cadence_webhook.go b/apis/clusters/v1beta1/cadence_webhook.go
index f7fe05572..7684c2eaa 100644
--- a/apis/clusters/v1beta1/cadence_webhook.go
+++ b/apis/clusters/v1beta1/cadence_webhook.go
@@ -81,6 +81,19 @@ func (cv *cadenceValidator) ValidateCreate(ctx context.Context, obj runtime.Obje
return err
}
+ if c.Spec.OnPremisesSpec != nil {
+ err = c.Spec.OnPremisesSpec.ValidateCreation()
+ if err != nil {
+ return err
+ }
+ if c.Spec.PrivateNetworkCluster {
+ err = c.Spec.OnPremisesSpec.ValidateSSHGatewayCreation()
+ if err != nil {
+ return err
+ }
+ }
+ }
+
appVersions, err := cv.API.ListAppVersions(models.CadenceAppKind)
if err != nil {
return fmt.Errorf("cannot list versions for kind: %v, err: %w",
@@ -169,10 +182,22 @@ func (cv *cadenceValidator) ValidateCreate(ctx context.Context, obj runtime.Obje
return fmt.Errorf("data centres field is empty")
}
+ //TODO: add support of multiple DCs for OnPrem clusters
+ if len(c.Spec.DataCentres) > 1 && c.Spec.OnPremisesSpec != nil {
+ return fmt.Errorf("on-premises cluster can be provisioned with only one data centre")
+ }
+
for _, dc := range c.Spec.DataCentres {
- err := dc.DataCentre.ValidateCreation()
- if err != nil {
- return err
+ if c.Spec.OnPremisesSpec != nil {
+ err := dc.DataCentre.ValidateOnPremisesCreation()
+ if err != nil {
+ return err
+ }
+ } else {
+ err := dc.DataCentre.ValidateCreation()
+ if err != nil {
+ return err
+ }
}
if !c.Spec.PrivateNetworkCluster && dc.PrivateLink != nil {
diff --git a/apis/clusters/v1beta1/cassandra_types.go b/apis/clusters/v1beta1/cassandra_types.go
index cd19c9374..ece5da80c 100644
--- a/apis/clusters/v1beta1/cassandra_types.go
+++ b/apis/clusters/v1beta1/cassandra_types.go
@@ -21,7 +21,9 @@ import (
"fmt"
"strconv"
+ k8scorev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -56,6 +58,7 @@ type CassandraRestoreFrom struct {
// CassandraSpec defines the desired state of Cassandra
type CassandraSpec struct {
RestoreFrom *CassandraRestoreFrom `json:"restoreFrom,omitempty"`
+ OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"`
Cluster `json:",inline"`
DataCentres []*CassandraDataCentre `json:"dataCentres,omitempty"`
LuceneEnabled bool `json:"luceneEnabled,omitempty"`
@@ -522,3 +525,80 @@ func (c *Cassandra) SetClusterID(id string) {
func init() {
SchemeBuilder.Register(&Cassandra{}, &CassandraList{})
}
+
+func (c *Cassandra) GetExposePorts() []k8scorev1.ServicePort {
+ var exposePorts []k8scorev1.ServicePort
+ if !c.Spec.PrivateNetworkCluster {
+ exposePorts = []k8scorev1.ServicePort{
+ {
+ Name: models.CassandraInterNode,
+ Port: models.Port7000,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port7000,
+ },
+ },
+ {
+ Name: models.CassandraCQL,
+ Port: models.Port9042,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port9042,
+ },
+ },
+ {
+ Name: models.CassandraJMX,
+ Port: models.Port7199,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port7199,
+ },
+ },
+ }
+ if c.Spec.DataCentres[0].ClientToClusterEncryption {
+ sslPort := k8scorev1.ServicePort{
+ Name: models.CassandraSSL,
+ Port: models.Port7001,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port7001,
+ },
+ }
+ exposePorts = append(exposePorts, sslPort)
+ }
+ }
+ return exposePorts
+}
+
+func (c *Cassandra) GetHeadlessPorts() []k8scorev1.ServicePort {
+ headlessPorts := []k8scorev1.ServicePort{
+ {
+ Name: models.CassandraInterNode,
+ Port: models.Port7000,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port7000,
+ },
+ },
+ {
+ Name: models.CassandraCQL,
+ Port: models.Port9042,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port9042,
+ },
+ },
+ }
+ if c.Spec.DataCentres[0].ClientToClusterEncryption {
+ sslPort := k8scorev1.ServicePort{
+ Name: models.CassandraSSL,
+ Port: models.Port7001,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port7001,
+ },
+ }
+ headlessPorts = append(headlessPorts, sslPort)
+ }
+ return headlessPorts
+}
diff --git a/apis/clusters/v1beta1/cassandra_webhook.go b/apis/clusters/v1beta1/cassandra_webhook.go
index 4d4a6fef6..db2998f2f 100644
--- a/apis/clusters/v1beta1/cassandra_webhook.go
+++ b/apis/clusters/v1beta1/cassandra_webhook.go
@@ -91,6 +91,19 @@ func (cv *cassandraValidator) ValidateCreate(ctx context.Context, obj runtime.Ob
return fmt.Errorf("spark should not have more than 1 item")
}
+ if c.Spec.OnPremisesSpec != nil {
+ err = c.Spec.OnPremisesSpec.ValidateCreation()
+ if err != nil {
+ return err
+ }
+ if c.Spec.PrivateNetworkCluster {
+ err = c.Spec.OnPremisesSpec.ValidateSSHGatewayCreation()
+ if err != nil {
+ return err
+ }
+ }
+ }
+
appVersions, err := cv.API.ListAppVersions(models.CassandraAppKind)
if err != nil {
return fmt.Errorf("cannot list versions for kind: %v, err: %w",
@@ -113,10 +126,22 @@ func (cv *cassandraValidator) ValidateCreate(ctx context.Context, obj runtime.Ob
return fmt.Errorf("data centres field is empty")
}
+ //TODO: add support of multiple DCs for OnPrem clusters
+ if len(c.Spec.DataCentres) > 1 && c.Spec.OnPremisesSpec != nil {
+ return fmt.Errorf("on-premises cluster can be provisioned with only one data centre")
+ }
+
for _, dc := range c.Spec.DataCentres {
- err := dc.DataCentre.ValidateCreation()
- if err != nil {
- return err
+ if c.Spec.OnPremisesSpec != nil {
+ err := dc.DataCentre.ValidateOnPremisesCreation()
+ if err != nil {
+ return err
+ }
+ } else {
+ err := dc.DataCentre.ValidateCreation()
+ if err != nil {
+ return err
+ }
}
if !c.Spec.PrivateNetworkCluster && dc.PrivateIPBroadcastForDiscovery {
diff --git a/apis/clusters/v1beta1/kafka_types.go b/apis/clusters/v1beta1/kafka_types.go
index d4c51261c..2af6c17e1 100644
--- a/apis/clusters/v1beta1/kafka_types.go
+++ b/apis/clusters/v1beta1/kafka_types.go
@@ -19,7 +19,9 @@ package v1beta1
import (
"encoding/json"
+ k8scorev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/instaclustr/operator/pkg/models"
@@ -63,6 +65,7 @@ type KarapaceSchemaRegistry struct {
// KafkaSpec defines the desired state of Kafka
type KafkaSpec struct {
Cluster `json:",inline"`
+ OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"`
SchemaRegistry []*SchemaRegistry `json:"schemaRegistry,omitempty"`
// ReplicationFactor to use for new topic.
@@ -484,3 +487,72 @@ func (k *Kafka) GetClusterID() string {
func (k *Kafka) SetClusterID(id string) {
k.Status.ID = id
}
+
+func (k *Kafka) GetExposePorts() []k8scorev1.ServicePort {
+ var exposePorts []k8scorev1.ServicePort
+ if !k.Spec.PrivateNetworkCluster {
+ exposePorts = []k8scorev1.ServicePort{
+ {
+ Name: models.KafkaClient,
+ Port: models.Port9092,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port9092,
+ },
+ },
+ {
+ Name: models.KafkaControlPlane,
+ Port: models.Port9093,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port9093,
+ },
+ },
+ }
+ if k.Spec.ClientToClusterEncryption {
+ sslPort := k8scorev1.ServicePort{
+ Name: models.KafkaBroker,
+ Port: models.Port9094,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port9094,
+ },
+ }
+ exposePorts = append(exposePorts, sslPort)
+ }
+ }
+ return exposePorts
+}
+
+func (k *Kafka) GetHeadlessPorts() []k8scorev1.ServicePort {
+ headlessPorts := []k8scorev1.ServicePort{
+ {
+ Name: models.KafkaClient,
+ Port: models.Port9092,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port9092,
+ },
+ },
+ {
+ Name: models.KafkaControlPlane,
+ Port: models.Port9093,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port9093,
+ },
+ },
+ }
+ if k.Spec.ClientToClusterEncryption {
+ kafkaBrokerPort := k8scorev1.ServicePort{
+ Name: models.KafkaBroker,
+ Port: models.Port9094,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port9094,
+ },
+ }
+ headlessPorts = append(headlessPorts, kafkaBrokerPort)
+ }
+ return headlessPorts
+}
diff --git a/apis/clusters/v1beta1/kafka_webhook.go b/apis/clusters/v1beta1/kafka_webhook.go
index 47971e2ab..7c1d65c4e 100644
--- a/apis/clusters/v1beta1/kafka_webhook.go
+++ b/apis/clusters/v1beta1/kafka_webhook.go
@@ -81,6 +81,19 @@ func (kv *kafkaValidator) ValidateCreate(ctx context.Context, obj runtime.Object
return err
}
+ if k.Spec.OnPremisesSpec != nil {
+ err = k.Spec.OnPremisesSpec.ValidateCreation()
+ if err != nil {
+ return err
+ }
+ if k.Spec.PrivateNetworkCluster {
+ err = k.Spec.OnPremisesSpec.ValidateSSHGatewayCreation()
+ if err != nil {
+ return err
+ }
+ }
+ }
+
appVersions, err := kv.API.ListAppVersions(models.KafkaAppKind)
if err != nil {
return fmt.Errorf("cannot list versions for kind: %v, err: %w",
@@ -96,10 +109,22 @@ func (kv *kafkaValidator) ValidateCreate(ctx context.Context, obj runtime.Object
return models.ErrZeroDataCentres
}
+ //TODO: add support of multiple DCs for OnPrem clusters
+ if len(k.Spec.DataCentres) > 1 && k.Spec.OnPremisesSpec != nil {
+ return fmt.Errorf("on-premises cluster can be provisioned with only one data centre")
+ }
+
for _, dc := range k.Spec.DataCentres {
- err := dc.DataCentre.ValidateCreation()
- if err != nil {
- return err
+ if k.Spec.OnPremisesSpec != nil {
+ err := dc.DataCentre.ValidateOnPremisesCreation()
+ if err != nil {
+ return err
+ }
+ } else {
+ err := dc.DataCentre.ValidateCreation()
+ if err != nil {
+ return err
+ }
}
if len(dc.PrivateLink) > 1 {
diff --git a/apis/clusters/v1beta1/kafkaconnect_types.go b/apis/clusters/v1beta1/kafkaconnect_types.go
index a101ee454..9c2e99f22 100644
--- a/apis/clusters/v1beta1/kafkaconnect_types.go
+++ b/apis/clusters/v1beta1/kafkaconnect_types.go
@@ -20,8 +20,10 @@ import (
"encoding/json"
"fmt"
+ k8scorev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/instaclustr/operator/pkg/models"
@@ -105,9 +107,10 @@ type KafkaConnectDataCentre struct {
// KafkaConnectSpec defines the desired state of KafkaConnect
type KafkaConnectSpec struct {
- Cluster `json:",inline"`
- DataCentres []*KafkaConnectDataCentre `json:"dataCentres"`
- TargetCluster []*TargetCluster `json:"targetCluster"`
+ Cluster `json:",inline"`
+ OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"`
+ DataCentres []*KafkaConnectDataCentre `json:"dataCentres"`
+ TargetCluster []*TargetCluster `json:"targetCluster"`
// CustomConnectors defines the location for custom connector storage and access info.
CustomConnectors []*CustomConnectors `json:"customConnectors,omitempty"`
@@ -720,3 +723,34 @@ func (k *KafkaConnect) NewDefaultUserSecret(username, password string) *v1.Secre
},
}
}
+
+func (k *KafkaConnect) GetExposePorts() []k8scorev1.ServicePort {
+ var exposePorts []k8scorev1.ServicePort
+ if !k.Spec.PrivateNetworkCluster {
+ exposePorts = []k8scorev1.ServicePort{
+ {
+ Name: models.KafkaConnectAPI,
+ Port: models.Port8083,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port8083,
+ },
+ },
+ }
+ }
+ return exposePorts
+}
+
+func (k *KafkaConnect) GetHeadlessPorts() []k8scorev1.ServicePort {
+ headlessPorts := []k8scorev1.ServicePort{
+ {
+ Name: models.KafkaConnectAPI,
+ Port: models.Port8083,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port8083,
+ },
+ },
+ }
+ return headlessPorts
+}
diff --git a/apis/clusters/v1beta1/kafkaconnect_webhook.go b/apis/clusters/v1beta1/kafkaconnect_webhook.go
index c239c54e9..eae339c07 100644
--- a/apis/clusters/v1beta1/kafkaconnect_webhook.go
+++ b/apis/clusters/v1beta1/kafkaconnect_webhook.go
@@ -82,6 +82,19 @@ func (kcv *kafkaConnectValidator) ValidateCreate(ctx context.Context, obj runtim
return err
}
+ if kc.Spec.OnPremisesSpec != nil {
+ err = kc.Spec.OnPremisesSpec.ValidateCreation()
+ if err != nil {
+ return err
+ }
+ if kc.Spec.PrivateNetworkCluster {
+ err = kc.Spec.OnPremisesSpec.ValidateSSHGatewayCreation()
+ if err != nil {
+ return err
+ }
+ }
+ }
+
appVersions, err := kcv.API.ListAppVersions(models.KafkaConnectAppKind)
if err != nil {
return fmt.Errorf("cannot list versions for kind: %v, err: %w",
@@ -93,10 +106,6 @@ func (kcv *kafkaConnectValidator) ValidateCreate(ctx context.Context, obj runtim
return err
}
- if len(kc.Spec.DataCentres) == 0 {
- return fmt.Errorf("data centres field is empty")
- }
-
if len(kc.Spec.TargetCluster) > 1 {
return fmt.Errorf("targetCluster array size must be between 0 and 1")
}
@@ -126,10 +135,26 @@ func (kcv *kafkaConnectValidator) ValidateCreate(ctx context.Context, obj runtim
return fmt.Errorf("customConnectors array size must be between 0 and 1")
}
+ if len(kc.Spec.DataCentres) == 0 {
+ return fmt.Errorf("data centres field is empty")
+ }
+
+ //TODO: add support of multiple DCs for OnPrem clusters
+ if len(kc.Spec.DataCentres) > 1 && kc.Spec.OnPremisesSpec != nil {
+ return fmt.Errorf("on-premises cluster can be provisioned with only one data centre")
+ }
+
for _, dc := range kc.Spec.DataCentres {
- err := dc.ValidateCreation()
- if err != nil {
- return err
+ if kc.Spec.OnPremisesSpec != nil {
+ err := dc.DataCentre.ValidateOnPremisesCreation()
+ if err != nil {
+ return err
+ }
+ } else {
+ err := dc.DataCentre.ValidateCreation()
+ if err != nil {
+ return err
+ }
}
err = validateReplicationFactor(models.KafkaConnectReplicationFactors, dc.ReplicationFactor)
diff --git a/apis/clusters/v1beta1/postgresql_types.go b/apis/clusters/v1beta1/postgresql_types.go
index bde017933..4aae8eb93 100644
--- a/apis/clusters/v1beta1/postgresql_types.go
+++ b/apis/clusters/v1beta1/postgresql_types.go
@@ -24,9 +24,11 @@ import (
"unicode"
k8sCore "k8s.io/api/core/v1"
+ k8scorev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/intstr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -75,6 +77,7 @@ type PgRestoreFrom struct {
type PgSpec struct {
PgRestoreFrom *PgRestoreFrom `json:"pgRestoreFrom,omitempty"`
Cluster `json:",inline"`
+ OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"`
DataCentres []*PgDataCentre `json:"dataCentres,omitempty"`
ClusterConfigurations map[string]string `json:"clusterConfigurations,omitempty"`
SynchronousModeStrict bool `json:"synchronousModeStrict,omitempty"`
@@ -645,3 +648,34 @@ func GetDefaultPgUserSecret(
return userSecret, nil
}
+
+func (pg *PostgreSQL) GetExposePorts() []k8scorev1.ServicePort {
+ var exposePorts []k8scorev1.ServicePort
+ if !pg.Spec.PrivateNetworkCluster {
+ exposePorts = []k8scorev1.ServicePort{
+ {
+ Name: models.PostgreSQLDB,
+ Port: models.Port5432,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port5432,
+ },
+ },
+ }
+ }
+ return exposePorts
+}
+
+func (pg *PostgreSQL) GetHeadlessPorts() []k8scorev1.ServicePort {
+ headlessPorts := []k8scorev1.ServicePort{
+ {
+ Name: models.PostgreSQLDB,
+ Port: models.Port5432,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port5432,
+ },
+ },
+ }
+ return headlessPorts
+}
diff --git a/apis/clusters/v1beta1/postgresql_webhook.go b/apis/clusters/v1beta1/postgresql_webhook.go
index 51f567063..8dd560073 100644
--- a/apis/clusters/v1beta1/postgresql_webhook.go
+++ b/apis/clusters/v1beta1/postgresql_webhook.go
@@ -91,6 +91,19 @@ func (pgv *pgValidator) ValidateCreate(ctx context.Context, obj runtime.Object)
return err
}
+ if pg.Spec.OnPremisesSpec != nil {
+ err = pg.Spec.OnPremisesSpec.ValidateCreation()
+ if err != nil {
+ return err
+ }
+ if pg.Spec.PrivateNetworkCluster {
+ err = pg.Spec.OnPremisesSpec.ValidateSSHGatewayCreation()
+ if err != nil {
+ return err
+ }
+ }
+ }
+
if pg.Spec.UserRefs != nil {
err = pgv.validatePostgreSQLUsers(ctx, pg)
if err != nil {
@@ -113,10 +126,22 @@ func (pgv *pgValidator) ValidateCreate(ctx context.Context, obj runtime.Object)
return models.ErrZeroDataCentres
}
+ //TODO: add support of multiple DCs for OnPrem clusters
+ if len(pg.Spec.DataCentres) > 1 && pg.Spec.OnPremisesSpec != nil {
+ return fmt.Errorf("on-premises cluster can be provisioned with only one data centre")
+ }
+
for _, dc := range pg.Spec.DataCentres {
- err = dc.DataCentre.ValidateCreation()
- if err != nil {
- return err
+ if pg.Spec.OnPremisesSpec != nil {
+ err := dc.DataCentre.ValidateOnPremisesCreation()
+ if err != nil {
+ return err
+ }
+ } else {
+ err := dc.DataCentre.ValidateCreation()
+ if err != nil {
+ return err
+ }
}
err = dc.ValidatePGBouncer()
diff --git a/apis/clusters/v1beta1/redis_types.go b/apis/clusters/v1beta1/redis_types.go
index 02c1df4b4..34c5d9624 100644
--- a/apis/clusters/v1beta1/redis_types.go
+++ b/apis/clusters/v1beta1/redis_types.go
@@ -21,7 +21,9 @@ import (
"fmt"
"strconv"
+ k8scorev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -60,8 +62,9 @@ type RedisRestoreFrom struct {
// RedisSpec defines the desired state of Redis
type RedisSpec struct {
- RestoreFrom *RedisRestoreFrom `json:"restoreFrom,omitempty"`
- Cluster `json:",inline"`
+ RestoreFrom *RedisRestoreFrom `json:"restoreFrom,omitempty"`
+ Cluster `json:",inline"`
+ OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"`
// Enables client to node encryption
ClientEncryption bool `json:"clientEncryption,omitempty"`
@@ -492,3 +495,50 @@ func (r *Redis) SetClusterID(id string) {
func init() {
SchemeBuilder.Register(&Redis{}, &RedisList{})
}
+
+func (r *Redis) GetExposePorts() []k8scorev1.ServicePort {
+ var exposePorts []k8scorev1.ServicePort
+ if !r.Spec.PrivateNetworkCluster {
+ exposePorts = []k8scorev1.ServicePort{
+ {
+ Name: models.RedisDB,
+ Port: models.Port6379,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port6379,
+ },
+ },
+ {
+ Name: models.RedisBus,
+ Port: models.Port16379,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port16379,
+ },
+ },
+ }
+ }
+ return exposePorts
+}
+
+func (r *Redis) GetHeadlessPorts() []k8scorev1.ServicePort {
+ headlessPorts := []k8scorev1.ServicePort{
+ {
+ Name: models.RedisDB,
+ Port: models.Port6379,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port6379,
+ },
+ },
+ {
+ Name: models.RedisBus,
+ Port: models.Port16379,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port16379,
+ },
+ },
+ }
+ return headlessPorts
+}
diff --git a/apis/clusters/v1beta1/redis_webhook.go b/apis/clusters/v1beta1/redis_webhook.go
index 09c48b48e..eed414de2 100644
--- a/apis/clusters/v1beta1/redis_webhook.go
+++ b/apis/clusters/v1beta1/redis_webhook.go
@@ -92,6 +92,19 @@ func (rv *redisValidator) ValidateCreate(ctx context.Context, obj runtime.Object
return err
}
+ if r.Spec.OnPremisesSpec != nil {
+ err = r.Spec.OnPremisesSpec.ValidateCreation()
+ if err != nil {
+ return err
+ }
+ if r.Spec.PrivateNetworkCluster {
+ err = r.Spec.OnPremisesSpec.ValidateSSHGatewayCreation()
+ if err != nil {
+ return err
+ }
+ }
+ }
+
err = r.Spec.ValidatePrivateLink()
if err != nil {
return err
@@ -112,10 +125,21 @@ func (rv *redisValidator) ValidateCreate(ctx context.Context, obj runtime.Object
return fmt.Errorf("data centres field is empty")
}
+ if len(r.Spec.DataCentres) > 1 && r.Spec.OnPremisesSpec != nil {
+ return fmt.Errorf("on-premises cluster can be provisioned with only one data centre")
+ }
+
for _, dc := range r.Spec.DataCentres {
- err = dc.ValidateCreate()
- if err != nil {
- return err
+ if r.Spec.OnPremisesSpec != nil {
+ err := dc.DataCentre.ValidateOnPremisesCreation()
+ if err != nil {
+ return err
+ }
+ } else {
+ err := dc.DataCentre.ValidateCreation()
+ if err != nil {
+ return err
+ }
}
if !r.Spec.PrivateNetworkCluster && dc.PrivateLink != nil {
diff --git a/apis/clusters/v1beta1/structs.go b/apis/clusters/v1beta1/structs.go
index f8444df55..dbc230a38 100644
--- a/apis/clusters/v1beta1/structs.go
+++ b/apis/clusters/v1beta1/structs.go
@@ -121,6 +121,18 @@ type ClusteredMaintenanceEvent struct {
Upcoming []*clusterresource.MaintenanceEventStatus `json:"upcoming"`
}
+type OnPremisesSpec struct {
+ StorageClassName string `json:"storageClassName"`
+ OSDiskSize string `json:"osDiskSize"`
+ DataDiskSize string `json:"dataDiskSize"`
+ SSHGatewayCPU int64 `json:"sshGatewayCPU,omitempty"`
+ SSHGatewayMemory string `json:"sshGatewayMemory,omitempty"`
+ NodeCPU int64 `json:"nodeCPU"`
+ NodeMemory string `json:"nodeMemory"`
+ OSImageURL string `json:"osImageURL"`
+ CloudInitScriptRef *Reference `json:"cloudInitScriptRef"`
+}
+
type TwoFactorDelete struct {
// Email address which will be contacted when the cluster is requested to be deleted.
Email string `json:"email"`
diff --git a/apis/clusters/v1beta1/validation.go b/apis/clusters/v1beta1/validation.go
index 6eb8e742f..e172fe321 100644
--- a/apis/clusters/v1beta1/validation.go
+++ b/apis/clusters/v1beta1/validation.go
@@ -94,6 +94,40 @@ func (dc *DataCentre) ValidateCreation() error {
return nil
}
+func (ops *OnPremisesSpec) ValidateCreation() error {
+ osDiskSizeMatched, err := regexp.Match(models.StorageRegExp, []byte(ops.OSDiskSize))
+ if !osDiskSizeMatched || err != nil {
+ return fmt.Errorf("disk size field for node OS must fit pattern: %s",
+ models.StorageRegExp)
+ }
+
+ dataDiskSizeMatched, err := regexp.Match(models.StorageRegExp, []byte(ops.DataDiskSize))
+ if !dataDiskSizeMatched || err != nil {
+ return fmt.Errorf("disk size field for storring cluster data must fit pattern: %s",
+ models.StorageRegExp)
+ }
+
+ nodeMemoryMatched, err := regexp.Match(models.MemoryRegExp, []byte(ops.DataDiskSize))
+ if !nodeMemoryMatched || err != nil {
+ return fmt.Errorf("node memory field must fit pattern: %s",
+ models.MemoryRegExp)
+ }
+
+ return nil
+}
+
+func (ops *OnPremisesSpec) ValidateSSHGatewayCreation() error {
+ if ops.SSHGatewayCPU == 0 || ops.SSHGatewayMemory == "" {
+ return fmt.Errorf("fields SSHGatewayCPU and SSHGatewayMemory must not be empty")
+ }
+ sshGatewayMemoryMatched, err := regexp.Match(models.MemoryRegExp, []byte(ops.DataDiskSize))
+ if !sshGatewayMemoryMatched || err != nil {
+ return fmt.Errorf("ssh gateway memory field must fit pattern: %s",
+ models.MemoryRegExp)
+ }
+ return nil
+}
+
func (dc *DataCentre) validateImmutableCloudProviderSettingsUpdate(oldSettings []*CloudProviderSettings) error {
if len(oldSettings) != len(dc.CloudProviderSettings) {
return models.ErrImmutableCloudProviderSettings
@@ -217,3 +251,17 @@ func validateSingleConcurrentResize(concurrentResizes int) error {
return nil
}
+
+func (dc *DataCentre) ValidateOnPremisesCreation() error {
+ if dc.CloudProvider != models.ONPREMISES {
+ return fmt.Errorf("cloud provider %s is unavailable for data centre: %s, available value: %s",
+ dc.CloudProvider, dc.Name, models.ONPREMISES)
+ }
+
+ if dc.Region != models.CLIENTDC {
+ return fmt.Errorf("region %s is unavailable for data centre: %s, available value: %s",
+ dc.Region, dc.Name, models.CLIENTDC)
+ }
+
+ return nil
+}
diff --git a/apis/clusters/v1beta1/zz_generated.deepcopy.go b/apis/clusters/v1beta1/zz_generated.deepcopy.go
index a0cc86ca2..f53b2bd4c 100644
--- a/apis/clusters/v1beta1/zz_generated.deepcopy.go
+++ b/apis/clusters/v1beta1/zz_generated.deepcopy.go
@@ -232,6 +232,11 @@ func (in *CadenceList) DeepCopyObject() runtime.Object {
func (in *CadenceSpec) DeepCopyInto(out *CadenceSpec) {
*out = *in
in.Cluster.DeepCopyInto(&out.Cluster)
+ if in.OnPremisesSpec != nil {
+ in, out := &in.OnPremisesSpec, &out.OnPremisesSpec
+ *out = new(OnPremisesSpec)
+ (*in).DeepCopyInto(*out)
+ }
if in.DataCentres != nil {
in, out := &in.DataCentres, &out.DataCentres
*out = make([]*CadenceDataCentre, len(*in))
@@ -457,6 +462,11 @@ func (in *CassandraSpec) DeepCopyInto(out *CassandraSpec) {
*out = new(CassandraRestoreFrom)
(*in).DeepCopyInto(*out)
}
+ if in.OnPremisesSpec != nil {
+ in, out := &in.OnPremisesSpec, &out.OnPremisesSpec
+ *out = new(OnPremisesSpec)
+ (*in).DeepCopyInto(*out)
+ }
in.Cluster.DeepCopyInto(&out.Cluster)
if in.DataCentres != nil {
in, out := &in.DataCentres, &out.DataCentres
@@ -997,6 +1007,11 @@ func (in *KafkaConnectList) DeepCopyObject() runtime.Object {
func (in *KafkaConnectSpec) DeepCopyInto(out *KafkaConnectSpec) {
*out = *in
in.Cluster.DeepCopyInto(&out.Cluster)
+ if in.OnPremisesSpec != nil {
+ in, out := &in.OnPremisesSpec, &out.OnPremisesSpec
+ *out = new(OnPremisesSpec)
+ (*in).DeepCopyInto(*out)
+ }
if in.DataCentres != nil {
in, out := &in.DataCentres, &out.DataCentres
*out = make([]*KafkaConnectDataCentre, len(*in))
@@ -1121,6 +1136,11 @@ func (in *KafkaList) DeepCopyObject() runtime.Object {
func (in *KafkaSpec) DeepCopyInto(out *KafkaSpec) {
*out = *in
in.Cluster.DeepCopyInto(&out.Cluster)
+ if in.OnPremisesSpec != nil {
+ in, out := &in.OnPremisesSpec, &out.OnPremisesSpec
+ *out = new(OnPremisesSpec)
+ (*in).DeepCopyInto(*out)
+ }
if in.SchemaRegistry != nil {
in, out := &in.SchemaRegistry, &out.SchemaRegistry
*out = make([]*SchemaRegistry, len(*in))
@@ -1339,6 +1359,26 @@ func (in *Node) DeepCopy() *Node {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OnPremisesSpec) DeepCopyInto(out *OnPremisesSpec) {
+ *out = *in
+ if in.CloudInitScriptRef != nil {
+ in, out := &in.CloudInitScriptRef, &out.CloudInitScriptRef
+ *out = new(Reference)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremisesSpec.
+func (in *OnPremisesSpec) DeepCopy() *OnPremisesSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OnPremisesSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OpenSearch) DeepCopyInto(out *OpenSearch) {
*out = *in
@@ -1791,6 +1831,11 @@ func (in *PgSpec) DeepCopyInto(out *PgSpec) {
(*in).DeepCopyInto(*out)
}
in.Cluster.DeepCopyInto(&out.Cluster)
+ if in.OnPremisesSpec != nil {
+ in, out := &in.OnPremisesSpec, &out.OnPremisesSpec
+ *out = new(OnPremisesSpec)
+ (*in).DeepCopyInto(*out)
+ }
if in.DataCentres != nil {
in, out := &in.DataCentres, &out.DataCentres
*out = make([]*PgDataCentre, len(*in))
@@ -2099,6 +2144,11 @@ func (in *RedisSpec) DeepCopyInto(out *RedisSpec) {
(*in).DeepCopyInto(*out)
}
in.Cluster.DeepCopyInto(&out.Cluster)
+ if in.OnPremisesSpec != nil {
+ in, out := &in.OnPremisesSpec, &out.OnPremisesSpec
+ *out = new(OnPremisesSpec)
+ (*in).DeepCopyInto(*out)
+ }
if in.DataCentres != nil {
in, out := &in.DataCentres, &out.DataCentres
*out = make([]*RedisDataCentre, len(*in))
diff --git a/config/crd/bases/clusters.instaclustr.com_cadences.yaml b/config/crd/bases/clusters.instaclustr.com_cadences.yaml
index 1fd0ba456..d83957145 100644
--- a/config/crd/bases/clusters.instaclustr.com_cadences.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_cadences.yaml
@@ -125,6 +125,45 @@ spec:
name:
description: Name [ 3 .. 32 ] characters.
type: string
+ onPremisesSpec:
+ properties:
+ cloudInitScriptRef:
+ properties:
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - name
+ - namespace
+ type: object
+ dataDiskSize:
+ type: string
+ nodeCPU:
+ format: int64
+ type: integer
+ nodeMemory:
+ type: string
+ osDiskSize:
+ type: string
+ osImageURL:
+ type: string
+ sshGatewayCPU:
+ format: int64
+ type: integer
+ sshGatewayMemory:
+ type: string
+ storageClassName:
+ type: string
+ required:
+ - cloudInitScriptRef
+ - dataDiskSize
+ - nodeCPU
+ - nodeMemory
+ - osDiskSize
+ - osImageURL
+ - storageClassName
+ type: object
packagedProvisioning:
items:
properties:
diff --git a/config/crd/bases/clusters.instaclustr.com_cassandras.yaml b/config/crd/bases/clusters.instaclustr.com_cassandras.yaml
index b92d77a37..63270db0a 100644
--- a/config/crd/bases/clusters.instaclustr.com_cassandras.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_cassandras.yaml
@@ -109,6 +109,45 @@ spec:
name:
description: Name [ 3 .. 32 ] characters.
type: string
+ onPremisesSpec:
+ properties:
+ cloudInitScriptRef:
+ properties:
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - name
+ - namespace
+ type: object
+ dataDiskSize:
+ type: string
+ nodeCPU:
+ format: int64
+ type: integer
+ nodeMemory:
+ type: string
+ osDiskSize:
+ type: string
+ osImageURL:
+ type: string
+ sshGatewayCPU:
+ format: int64
+ type: integer
+ sshGatewayMemory:
+ type: string
+ storageClassName:
+ type: string
+ required:
+ - cloudInitScriptRef
+ - dataDiskSize
+ - nodeCPU
+ - nodeMemory
+ - osDiskSize
+ - osImageURL
+ - storageClassName
+ type: object
passwordAndUserAuth:
type: boolean
pciCompliance:
diff --git a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml
index 87209b6b0..8269e6d47 100644
--- a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml
@@ -179,6 +179,45 @@ spec:
name:
description: Name [ 3 .. 32 ] characters.
type: string
+ onPremisesSpec:
+ properties:
+ cloudInitScriptRef:
+ properties:
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - name
+ - namespace
+ type: object
+ dataDiskSize:
+ type: string
+ nodeCPU:
+ format: int64
+ type: integer
+ nodeMemory:
+ type: string
+ osDiskSize:
+ type: string
+ osImageURL:
+ type: string
+ sshGatewayCPU:
+ format: int64
+ type: integer
+ sshGatewayMemory:
+ type: string
+ storageClassName:
+ type: string
+ required:
+ - cloudInitScriptRef
+ - dataDiskSize
+ - nodeCPU
+ - nodeMemory
+ - osDiskSize
+ - osImageURL
+ - storageClassName
+ type: object
pciCompliance:
description: The PCI compliance standards relate to the security of
user data and transactional information. Can only be applied clusters
diff --git a/config/crd/bases/clusters.instaclustr.com_kafkas.yaml b/config/crd/bases/clusters.instaclustr.com_kafkas.yaml
index a6e3de764..4139e9677 100644
--- a/config/crd/bases/clusters.instaclustr.com_kafkas.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_kafkas.yaml
@@ -165,6 +165,45 @@ spec:
name:
description: Name [ 3 .. 32 ] characters.
type: string
+ onPremisesSpec:
+ properties:
+ cloudInitScriptRef:
+ properties:
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - name
+ - namespace
+ type: object
+ dataDiskSize:
+ type: string
+ nodeCPU:
+ format: int64
+ type: integer
+ nodeMemory:
+ type: string
+ osDiskSize:
+ type: string
+ osImageURL:
+ type: string
+ sshGatewayCPU:
+ format: int64
+ type: integer
+ sshGatewayMemory:
+ type: string
+ storageClassName:
+ type: string
+ required:
+ - cloudInitScriptRef
+ - dataDiskSize
+ - nodeCPU
+ - nodeMemory
+ - osDiskSize
+ - osImageURL
+ - storageClassName
+ type: object
partitionsNumber:
description: PartitionsNumber number of partitions to use when created
new topics.
@@ -264,6 +303,18 @@ spec:
status:
description: KafkaStatus defines the observed state of Kafka
properties:
+ availableUsers:
+ items:
+ properties:
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - name
+ - namespace
+ type: object
+ type: array
cdcid:
type: string
currentClusterOperationStatus:
diff --git a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml
index 5ec93a122..6bc581752 100644
--- a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml
@@ -132,6 +132,45 @@ spec:
name:
description: Name [ 3 .. 32 ] characters.
type: string
+ onPremisesSpec:
+ properties:
+ cloudInitScriptRef:
+ properties:
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - name
+ - namespace
+ type: object
+ dataDiskSize:
+ type: string
+ nodeCPU:
+ format: int64
+ type: integer
+ nodeMemory:
+ type: string
+ osDiskSize:
+ type: string
+ osImageURL:
+ type: string
+ sshGatewayCPU:
+ format: int64
+ type: integer
+ sshGatewayMemory:
+ type: string
+ storageClassName:
+ type: string
+ required:
+ - cloudInitScriptRef
+ - dataDiskSize
+ - nodeCPU
+ - nodeMemory
+ - osDiskSize
+ - osImageURL
+ - storageClassName
+ type: object
pciCompliance:
description: The PCI compliance standards relate to the security of
user data and transactional information. Can only be applied clusters
diff --git a/config/crd/bases/clusters.instaclustr.com_redis.yaml b/config/crd/bases/clusters.instaclustr.com_redis.yaml
index df4104371..d297d0347 100644
--- a/config/crd/bases/clusters.instaclustr.com_redis.yaml
+++ b/config/crd/bases/clusters.instaclustr.com_redis.yaml
@@ -115,6 +115,45 @@ spec:
name:
description: Name [ 3 .. 32 ] characters.
type: string
+ onPremisesSpec:
+ properties:
+ cloudInitScriptRef:
+ properties:
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - name
+ - namespace
+ type: object
+ dataDiskSize:
+ type: string
+ nodeCPU:
+ format: int64
+ type: integer
+ nodeMemory:
+ type: string
+ osDiskSize:
+ type: string
+ osImageURL:
+ type: string
+ sshGatewayCPU:
+ format: int64
+ type: integer
+ sshGatewayMemory:
+ type: string
+ storageClassName:
+ type: string
+ required:
+ - cloudInitScriptRef
+ - dataDiskSize
+ - nodeCPU
+ - nodeMemory
+ - osDiskSize
+ - osImageURL
+ - storageClassName
+ type: object
passwordAndUserAuth:
type: boolean
pciCompliance:
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index ee225136e..167097051 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -32,6 +32,31 @@ rules:
- get
- list
- watch
+- apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
- apiGroups:
- ""
resources:
@@ -39,6 +64,7 @@ rules:
verbs:
- create
- delete
+ - deletecollection
- get
- list
- patch
@@ -51,6 +77,20 @@ rules:
verbs:
- create
- delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - cdi.kubevirt.io
+ resources:
+ - datavolumes
+ verbs:
+ - create
+ - delete
+ - deletecollection
- get
- list
- patch
@@ -796,3 +836,29 @@ rules:
- get
- patch
- update
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - virtualmachineinstances
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - virtualmachines
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
diff --git a/config/samples/clusters_v1beta1_cassandra.yaml b/config/samples/clusters_v1beta1_cassandra.yaml
index b0e1cb1a0..2119a7c3a 100644
--- a/config/samples/clusters_v1beta1_cassandra.yaml
+++ b/config/samples/clusters_v1beta1_cassandra.yaml
@@ -37,20 +37,19 @@ spec:
pciCompliance: false
luceneEnabled: false # can be enabled only on 3.11.13 version of Cassandra
passwordAndUserAuth: true
-# userRefs:
-# - namespace: default
-# name: cassandrauser-sample
-# - namespace: default
-# name: cassandrauser-sample2
-# - namespace: default
-# name: cassandrauser-sample3
+ # userRefs:
+ # - namespace: default
+ # name: cassandrauser-sample
+ # - namespace: default
+ # name: cassandrauser-sample2
+ # - namespace: default
+ # name: cassandrauser-sample3
slaTier: "NON_PRODUCTION"
-# resizeSettings:
-# - notifySupportContacts: false
-# concurrency: 2
-# description: "this is a sample of description"
-# twoFactorDelete:
-# - email: "rostyslp@netapp.com"
- #spark:
- # - version: "2.3.2" # 3.0.1 for 4.0.4 version of Cassandra | 2.3.2 for 3.11.13 version of Cassandra
-
+ # resizeSettings:
+ # - notifySupportContacts: false
+ # concurrency: 2
+ # description: "this is a sample of description"
+ # twoFactorDelete:
+ # - email: "rostyslp@netapp.com"
+ #spark:
+ # - version: "2.3.2" # 3.0.1 for 4.0.4 version of Cassandra | 2.3.2 for 3.11.13 version of Cassandra
\ No newline at end of file
diff --git a/config/samples/onpremises/clusters_v1beta1_cadence.yaml b/config/samples/onpremises/clusters_v1beta1_cadence.yaml
new file mode 100644
index 000000000..72b302978
--- /dev/null
+++ b/config/samples/onpremises/clusters_v1beta1_cadence.yaml
@@ -0,0 +1,63 @@
+#apiVersion: v1
+#kind: Secret
+#metadata:
+# name: inst-test-aws-cred-secret
+#data:
+# awsAccessKeyId: access_key
+# awsSecretAccessKey: secret_key
+#---
+apiVersion: clusters.instaclustr.com/v1beta1
+kind: Cadence
+metadata:
+ name: cadence-sample
+spec:
+ name: "username-test"
+ version: "1.0.0"
+ standardProvisioning:
+ - targetCassandra:
+ dependencyCdcId: "9d43ac54-7317-4ce5-859a-e9d0443508a4"
+ dependencyVpcType: "VPC_PEERED"
+# packagedProvisioning:
+# - bundledCassandraSpec:
+# nodeSize: "CAS-DEV-t4g.small-5"
+# network: "10.2.0.0/16"
+# replicationFactor: 3
+# nodesNumber: 3
+# privateIPBroadcastForDiscovery: false
+# passwordAndUserAuth: true
+# useAdvancedVisibility: true
+# bundledKafkaSpec:
+# nodeSize: "KFK-DEV-t4g.small-5"
+# nodesNumber: 3
+# network: "10.3.0.0/16"
+# replicationFactor: 3
+# partitionsNumber: 3
+# bundledOpenSearchSpec:
+# nodeSize: "SRH-DEV-t4g.small-5"
+# replicationFactor: 3
+# network: "10.4.0.0/16"
+ # twoFactorDelete:
+ # - email: "rostyslp@netapp.com"
+ privateNetworkCluster: false
+ dataCentres:
+ - region: "US_EAST_2"
+ network: "10.12.0.0/16"
+ # if you use multi-region mode please provide
+ # non-overlapping CIDR block for the secondary mode cluster
+# network: "10.16.0.0/16"
+ cloudProvider: "AWS_VPC"
+ name: "testdc"
+# nodeSize: "CAD-PRD-m5ad.large-75"
+ nodeSize: "CAD-DEV-t3.small-5"
+ nodesNumber: 2
+ clientEncryption: false
+# privateLink:
+# - advertisedHostname: "cadence-sample-test.com"
+ slaTier: "NON_PRODUCTION"
+ useCadenceWebAuth: false
+# targetPrimaryCadence:
+# - dependencyCdcId: "cce79be3-7f41-4cad-837c-86d3d8b4be77"
+# dependencyVpcType: "SEPARATE_VPC"
+ resizeSettings:
+ - notifySupportContacts: false
+ concurrency: 1
\ No newline at end of file
diff --git a/config/samples/onpremises/clusters_v1beta1_cassandra.yaml b/config/samples/onpremises/clusters_v1beta1_cassandra.yaml
new file mode 100644
index 000000000..8b7968872
--- /dev/null
+++ b/config/samples/onpremises/clusters_v1beta1_cassandra.yaml
@@ -0,0 +1,37 @@
+apiVersion: clusters.instaclustr.com/v1beta1
+kind: Cassandra
+metadata:
+ name: cassandra-on-prem-cluster
+spec:
+ name: "danylo-on-prem-cassandra"
+ version: "4.0.10"
+ privateNetworkCluster: false
+ onPremisesSpec:
+ storageClassName: managed-csi-premium
+ osDiskSize: 20Gi
+ dataDiskSize: 200Gi
+ sshGatewayCPU: 2
+ sshGatewayMemory: 4096Mi
+ nodeCPU: 2
+ nodeMemory: 8192Mi
+ osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw"
+ cloudInitScriptRef:
+ namespace: default
+ name: instaclustr-cloud-init-secret
+ dataCentres:
+ - name: "onPremCassandra"
+ region: "CLIENT_DC"
+ cloudProvider: "ONPREMISES"
+ continuousBackup: false
+ nodesNumber: 2
+ replicationFactor: 2
+ privateIpBroadcastForDiscovery: false
+ network: "192.168.0.0/16"
+ tags:
+ "onprem": "test"
+ clientToClusterEncryption: false
+ nodeSize: "CAS-PRD-OP.4.8-200"
+ pciCompliance: false
+ luceneEnabled: false # can be enabled only on 3.11.13 version of Cassandra
+ passwordAndUserAuth: false
+ slaTier: "NON_PRODUCTION"
diff --git a/config/samples/onpremises/clusters_v1beta1_kafka.yaml b/config/samples/onpremises/clusters_v1beta1_kafka.yaml
new file mode 100644
index 000000000..79f5574b6
--- /dev/null
+++ b/config/samples/onpremises/clusters_v1beta1_kafka.yaml
@@ -0,0 +1,40 @@
+apiVersion: clusters.instaclustr.com/v1beta1
+kind: Kafka
+metadata:
+ name: danylo-kafka
+spec:
+ name: "danylo-kafka"
+ version: "3.3.1"
+ pciCompliance: false
+ replicationFactor: 3
+ partitionsNumber: 3
+ allowDeleteTopics: true
+ autoCreateTopics: true
+ clientToClusterEncryption: false
+ privateNetworkCluster: false
+ slaTier: "NON_PRODUCTION"
+ onPremisesSpec:
+ storageClassName: managed-csi-premium
+ osDiskSize: 20Gi
+ dataDiskSize: 200Gi
+ sshGatewayCPU: 2
+ sshGatewayMemory: 4096Mi
+ nodeCPU: 2
+ nodeMemory: 8192Mi
+ osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw"
+ cloudInitScriptRef:
+ namespace: default
+ name: instaclustr-cloud-init-secret
+ dataCentres:
+ - name: "onPremKafka"
+ nodesNumber: 3
+ cloudProvider: "ONPREMISES"
+ tags:
+ tag: "oneTag"
+ tag2: "twoTags"
+ nodeSize: "KFK-DEV-OP.4.8-200"
+ network: "10.0.0.0/16"
+ region: "CLIENT_DC"
+ resizeSettings:
+ - notifySupportContacts: false
+ concurrency: 1
\ No newline at end of file
diff --git a/config/samples/onpremises/clusters_v1beta1_kafkaconnect.yaml b/config/samples/onpremises/clusters_v1beta1_kafkaconnect.yaml
new file mode 100644
index 000000000..0b6a840a0
--- /dev/null
+++ b/config/samples/onpremises/clusters_v1beta1_kafkaconnect.yaml
@@ -0,0 +1,25 @@
+apiVersion: clusters.instaclustr.com/v1beta1
+kind: KafkaConnect
+metadata:
+ name: kafkaconnect-sample
+spec:
+ dataCentres:
+ - name: "US_EAST_1_DC_KAFKA"
+ nodesNumber: 3
+# nodesNumber: 6
+ cloudProvider: "AWS_VPC"
+ replicationFactor: 3
+ tags:
+ tag: "oneTag"
+ tag2: "twoTags"
+ nodeSize: "KCN-DEV-t4g.medium-30"
+ network: "10.15.0.0/16"
+ region: "US_EAST_1"
+ name: "Username-KC"
+ version: "3.1.2"
+ privateNetworkCluster: false
+ slaTier: "NON_PRODUCTION"
+ targetCluster:
+ - managedCluster:
+ - targetKafkaClusterId: "34dfc53c-c8c1-4be8-bd2f-cfdb77ec7349"
+ kafkaConnectVpcType: "KAFKA_VPC"
diff --git a/config/samples/onpremises/clusters_v1beta1_postgresql.yaml b/config/samples/onpremises/clusters_v1beta1_postgresql.yaml
new file mode 100644
index 000000000..11c3b5859
--- /dev/null
+++ b/config/samples/onpremises/clusters_v1beta1_postgresql.yaml
@@ -0,0 +1,52 @@
+apiVersion: clusters.instaclustr.com/v1beta1
+kind: PostgreSQL
+metadata:
+ name: postgresql-sample
+# TODO https://github.com/instaclustr/operator/issues/472
+# annotations:
+# testAnnotation: test
+spec:
+ name: "username-test"
+ version: "15.4.0"
+ dataCentres:
+ - region: "US_WEST_2"
+ network: "10.1.0.0/16"
+ cloudProvider: "AWS_VPC"
+# nodeSize: "PGS-DEV-t4g.medium-30"
+ nodeSize: "PGS-DEV-t4g.small-5"
+ nodesNumber: 2
+ clientEncryption: false
+ name: "testDC1"
+ intraDataCentreReplication:
+ - replicationMode: "SYNCHRONOUS"
+ interDataCentreReplication:
+ - isPrimaryDataCentre: true
+ # - region: "US_WEST_2"
+ # network: "10.2.0.0/16"
+ # cloudProvider: "AWS_VPC"
+ ## nodeSize: "PGS-DEV-t4g.medium-30"
+ # nodeSize: "PGS-DEV-t4g.small-5"
+ # racksNumber: 2
+ # nodesNumber: 1
+ # postgresqlNodeCount: 2
+ # clientEncryption: false
+ # name: "testDC2"
+ # intraDataCentreReplication:
+ # - replicationMode: "ASYNCHRONOUS"
+ # interDataCentreReplication:
+ # - isPrimaryDataCentre: false
+ # clusterConfigurations:
+ # idle_in_transaction_session_timeout: "2"
+ # statement_timeout: "1"
+ # twoFactorDelete:
+ # - email: "rostyslp@netapp.com"
+ # description: "test 222"
+ slaTier: "NON_PRODUCTION"
+# userRefs:
+# - namespace: default
+# name: postgresqluser-sample
+ privateNetworkCluster: false
+ synchronousModeStrict: false
+# resizeSettings:
+# - notifySupportContacts: false
+# concurrency: 1
\ No newline at end of file
diff --git a/config/samples/onpremises/clusters_v1beta1_redis.yaml b/config/samples/onpremises/clusters_v1beta1_redis.yaml
new file mode 100644
index 000000000..d7f28edd2
--- /dev/null
+++ b/config/samples/onpremises/clusters_v1beta1_redis.yaml
@@ -0,0 +1,32 @@
+apiVersion: clusters.instaclustr.com/v1beta1
+kind: Redis
+metadata:
+ name: danylo-redis
+spec:
+ name: "danylo-redis"
+ version: "7.0.12"
+ onPremisesSpec:
+ storageClassName: managed-csi-premium
+ osDiskSize: 20Gi
+ dataDiskSize: 200Gi
+ sshGatewayCPU: 2
+ sshGatewayMemory: 4096Mi
+ nodeCPU: 2
+ nodeMemory: 8192Mi
+ osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw"
+ cloudInitScriptRef:
+ namespace: default
+ name: instaclustr-cloud-init-secret
+ slaTier: "NON_PRODUCTION"
+ clientEncryption: false
+ passwordAndUserAuth: true
+ privateNetworkCluster: false
+ dataCentres:
+ - region: "CLIENT_DC"
+ name: "onPremRedis"
+ cloudProvider: "ONPREMISES"
+ network: "10.1.0.0/16"
+ nodeSize: "RDS-PRD-OP.8.64-400"
+ masterNodes: 3
+ nodesNumber: 0
+ replicationFactor: 0
\ No newline at end of file
diff --git a/controllers/clusterresources/awsendpointserviceprincipal_controller.go b/controllers/clusterresources/awsendpointserviceprincipal_controller.go
index c28c11b78..85f58b4b9 100644
--- a/controllers/clusterresources/awsendpointserviceprincipal_controller.go
+++ b/controllers/clusterresources/awsendpointserviceprincipal_controller.go
@@ -101,7 +101,7 @@ func (r *AWSEndpointServicePrincipalReconciler) handleCreate(ctx context.Context
err = json.Unmarshal(b, &principal.Status)
if err != nil {
l.Error(err, "failed to parse an AWS endpoint service principal resource response from Instaclustr")
- r.EventRecorder.Eventf(principal, models.Warning, models.ConvertionFailed,
+ r.EventRecorder.Eventf(principal, models.Warning, models.ConversionFailed,
"Failed to parse an AWS endpoint service principal resource response from Instaclustr. Reason: %v", err,
)
diff --git a/controllers/clusterresources/clusterbackup_controller.go b/controllers/clusterresources/clusterbackup_controller.go
index aa25574f9..db42d76d2 100644
--- a/controllers/clusterresources/clusterbackup_controller.go
+++ b/controllers/clusterresources/clusterbackup_controller.go
@@ -172,7 +172,7 @@ func (r *ClusterBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reques
)
r.EventRecorder.Eventf(
- backup, models.Warning, models.ConvertionFailed,
+ backup, models.Warning, models.ConversionFailed,
"Start timestamp annotation convertion to int is failed. Reason: %v",
err,
)
diff --git a/controllers/clusters/cadence_controller.go b/controllers/clusters/cadence_controller.go
index a22f993dd..ce4b61f91 100644
--- a/controllers/clusters/cadence_controller.go
+++ b/controllers/clusters/cadence_controller.go
@@ -35,6 +35,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/instaclustr/operator/apis/clusters/v1beta1"
"github.com/instaclustr/operator/pkg/exposeservice"
@@ -56,8 +57,15 @@ type CadenceReconciler struct {
//+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=cadences,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=cadences/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=cadences/finalizers,verbs=update
-//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;patch
//+kubebuilder:rbac:groups="",resources=events,verbs=create
+//+kubebuilder:rbac:groups="",resources=endpoints,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
@@ -65,92 +73,92 @@ type CadenceReconciler struct {
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.0/pkg/reconcile
func (r *CadenceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
- logger := log.FromContext(ctx)
+ l := log.FromContext(ctx)
- cadenceCluster := &v1beta1.Cadence{}
- err := r.Client.Get(ctx, req.NamespacedName, cadenceCluster)
+ c := &v1beta1.Cadence{}
+ err := r.Client.Get(ctx, req.NamespacedName, c)
if err != nil {
if k8serrors.IsNotFound(err) {
- logger.Info("Cadence resource is not found",
+ l.Info("Cadence resource is not found",
"resource name", req.NamespacedName,
)
return ctrl.Result{}, nil
}
- logger.Error(err, "Unable to fetch Cadence resource",
+ l.Error(err, "Unable to fetch Cadence resource",
"resource name", req.NamespacedName,
)
return ctrl.Result{}, err
}
- switch cadenceCluster.Annotations[models.ResourceStateAnnotation] {
+ switch c.Annotations[models.ResourceStateAnnotation] {
case models.CreatingEvent:
- return r.HandleCreateCluster(ctx, cadenceCluster, logger)
+ return r.handleCreateCluster(ctx, c, l)
case models.UpdatingEvent:
- return r.HandleUpdateCluster(ctx, cadenceCluster, logger)
+ return r.handleUpdateCluster(ctx, c, l)
case models.DeletingEvent:
- return r.HandleDeleteCluster(ctx, cadenceCluster, logger)
+ return r.handleDeleteCluster(ctx, c, l)
case models.GenericEvent:
- logger.Info("Generic event isn't handled",
+ l.Info("Generic event isn't handled",
"request", req,
- "event", cadenceCluster.Annotations[models.ResourceStateAnnotation],
+ "event", c.Annotations[models.ResourceStateAnnotation],
)
return ctrl.Result{}, nil
default:
- logger.Info("Unknown event isn't handled",
+ l.Info("Unknown event isn't handled",
"request", req,
- "event", cadenceCluster.Annotations[models.ResourceStateAnnotation],
+ "event", c.Annotations[models.ResourceStateAnnotation],
)
return ctrl.Result{}, nil
}
}
-func (r *CadenceReconciler) HandleCreateCluster(
+func (r *CadenceReconciler) handleCreateCluster(
ctx context.Context,
- cadence *v1beta1.Cadence,
- logger logr.Logger,
+ c *v1beta1.Cadence,
+ l logr.Logger,
) (ctrl.Result, error) {
- if cadence.Status.ID == "" {
- patch := cadence.NewPatch()
+ if c.Status.ID == "" {
+ patch := c.NewPatch()
- for _, packagedProvisioning := range cadence.Spec.PackagedProvisioning {
- requeueNeeded, err := r.preparePackagedSolution(ctx, cadence, packagedProvisioning)
+ for _, packagedProvisioning := range c.Spec.PackagedProvisioning {
+ requeueNeeded, err := r.preparePackagedSolution(ctx, c, packagedProvisioning)
if err != nil {
- logger.Error(err, "Cannot prepare packaged solution for Cadence cluster",
- "cluster name", cadence.Spec.Name,
+ l.Error(err, "Cannot prepare packaged solution for Cadence cluster",
+ "cluster name", c.Spec.Name,
)
- r.EventRecorder.Eventf(cadence, models.Warning, models.CreationFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.CreationFailed,
"Cannot prepare packaged solution for Cadence cluster. Reason: %v", err)
return ctrl.Result{}, err
}
if requeueNeeded {
- logger.Info("Waiting for bundled clusters to be created",
- "cadence cluster name", cadence.Spec.Name)
+ l.Info("Waiting for bundled clusters to be created",
+ "c cluster name", c.Spec.Name)
- r.EventRecorder.Event(cadence, models.Normal, "Waiting",
+ r.EventRecorder.Event(c, models.Normal, "Waiting",
"Waiting for bundled clusters to be created")
return models.ReconcileRequeue, nil
}
}
- logger.Info(
+ l.Info(
"Creating Cadence cluster",
- "cluster name", cadence.Spec.Name,
- "data centres", cadence.Spec.DataCentres,
+ "cluster name", c.Spec.Name,
+ "data centres", c.Spec.DataCentres,
)
- cadenceAPISpec, err := cadence.Spec.ToInstAPI(ctx, r.Client)
+ cadenceAPISpec, err := c.Spec.ToInstAPI(ctx, r.Client)
if err != nil {
- logger.Error(err, "Cannot convert Cadence cluster manifest to API spec",
- "cluster manifest", cadence.Spec)
+ l.Error(err, "Cannot convert Cadence cluster manifest to API spec",
+ "cluster manifest", c.Spec)
- r.EventRecorder.Eventf(cadence, models.Warning, models.ConvertionFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.ConversionFailed,
"Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err)
return ctrl.Result{}, err
@@ -158,141 +166,218 @@ func (r *CadenceReconciler) HandleCreateCluster(
id, err := r.API.CreateCluster(instaclustr.CadenceEndpoint, cadenceAPISpec)
if err != nil {
- logger.Error(
+ l.Error(
err, "Cannot create Cadence cluster",
- "cadence manifest", cadence.Spec,
+ "c manifest", c.Spec,
)
- r.EventRecorder.Eventf(cadence, models.Warning, models.CreationFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.CreationFailed,
"Cluster creation on the Instaclustr is failed. Reason: %v", err)
return ctrl.Result{}, err
}
- cadence.Status.ID = id
- err = r.Status().Patch(ctx, cadence, patch)
+ c.Status.ID = id
+ err = r.Status().Patch(ctx, c, patch)
if err != nil {
- logger.Error(err, "Cannot update Cadence cluster status",
- "cluster name", cadence.Spec.Name,
- "cluster status", cadence.Status,
+ l.Error(err, "Cannot update Cadence cluster status",
+ "cluster name", c.Spec.Name,
+ "cluster status", c.Status,
)
- r.EventRecorder.Eventf(cadence, models.Warning, models.PatchFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed,
"Cluster resource status patch is failed. Reason: %v", err)
return ctrl.Result{}, err
}
- if cadence.Spec.Description != "" {
- err = r.API.UpdateDescriptionAndTwoFactorDelete(instaclustr.ClustersEndpointV1, id, cadence.Spec.Description, nil)
+ if c.Spec.Description != "" {
+ err = r.API.UpdateDescriptionAndTwoFactorDelete(instaclustr.ClustersEndpointV1, id, c.Spec.Description, nil)
if err != nil {
- logger.Error(err, "Cannot update Cadence cluster description and TwoFactorDelete",
- "cluster name", cadence.Spec.Name,
- "description", cadence.Spec.Description,
- "twoFactorDelete", cadence.Spec.TwoFactorDelete,
+ l.Error(err, "Cannot update Cadence cluster description and TwoFactorDelete",
+ "cluster name", c.Spec.Name,
+ "description", c.Spec.Description,
+ "twoFactorDelete", c.Spec.TwoFactorDelete,
)
- r.EventRecorder.Eventf(cadence, models.Warning, models.CreationFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.CreationFailed,
"Cluster description and TwoFactoDelete update is failed. Reason: %v", err)
}
}
- cadence.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent
- controllerutil.AddFinalizer(cadence, models.DeletionFinalizer)
+ c.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent
+ controllerutil.AddFinalizer(c, models.DeletionFinalizer)
- err = r.Patch(ctx, cadence, patch)
+ err = r.Patch(ctx, c, patch)
if err != nil {
- logger.Error(err, "Cannot patch Cadence cluster",
- "cluster name", cadence.Spec.Name, "patch", patch)
+ l.Error(err, "Cannot patch Cadence cluster",
+ "cluster name", c.Spec.Name, "patch", patch)
- r.EventRecorder.Eventf(cadence, models.Warning, models.PatchFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed,
"Cluster resource status patch is failed. Reason: %v", err)
return ctrl.Result{}, err
}
- logger.Info(
+ l.Info(
"Cadence resource has been created",
- "cluster name", cadence.Name,
- "cluster ID", cadence.Status.ID,
- "kind", cadence.Kind,
- "api version", cadence.APIVersion,
- "namespace", cadence.Namespace,
+ "cluster name", c.Name,
+ "cluster ID", c.Status.ID,
+ "kind", c.Kind,
+ "api version", c.APIVersion,
+ "namespace", c.Namespace,
)
- r.EventRecorder.Eventf(cadence, models.Normal, models.Created,
+ r.EventRecorder.Eventf(c, models.Normal, models.Created,
"Cluster creation request is sent. Cluster ID: %s", id)
}
- if cadence.Status.State != models.DeletedStatus {
- err := r.startClusterStatusJob(cadence)
+ if c.Status.State != models.DeletedStatus {
+ err := r.startClusterStatusJob(c)
if err != nil {
- logger.Error(err, "Cannot start cluster status job",
- "cadence cluster ID", cadence.Status.ID,
+ l.Error(err, "Cannot start cluster status job",
+ "c cluster ID", c.Status.ID,
)
- r.EventRecorder.Eventf(cadence, models.Warning, models.CreationFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.CreationFailed,
"Cluster status check job is failed. Reason: %v", err)
return ctrl.Result{}, err
}
- r.EventRecorder.Event(cadence, models.Normal, models.Created,
+ r.EventRecorder.Event(c, models.Normal, models.Created,
"Cluster status check job is started")
}
+ if c.Spec.OnPremisesSpec != nil {
+ iData, err := r.API.GetCadence(c.Status.ID)
+ if err != nil {
+ l.Error(err, "Cannot get cluster from the Instaclustr API",
+ "cluster name", c.Spec.Name,
+ "data centres", c.Spec.DataCentres,
+ "cluster ID", c.Status.ID,
+ )
+ r.EventRecorder.Eventf(
+ c, models.Warning, models.FetchFailed,
+ "Cluster fetch from the Instaclustr API is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+ iCadence, err := c.FromInstAPI(iData)
+ if err != nil {
+ l.Error(
+ err, "Cannot convert cluster from the Instaclustr API",
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
+ )
+ r.EventRecorder.Eventf(
+ c, models.Warning, models.ConversionFailed,
+ "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ bootstrap := newOnPremisesBootstrap(
+ r.Client,
+ c,
+ r.EventRecorder,
+ iCadence.Status.ClusterStatus,
+ c.Spec.OnPremisesSpec,
+ newExposePorts(c.GetExposePorts()),
+ c.GetHeadlessPorts(),
+ c.Spec.PrivateNetworkCluster,
+ )
+
+ err = handleCreateOnPremisesClusterResources(ctx, bootstrap)
+ if err != nil {
+ l.Error(
+ err, "Cannot create resources for on-premises cluster",
+ "cluster spec", c.Spec.OnPremisesSpec,
+ )
+ r.EventRecorder.Eventf(
+ c, models.Warning, models.CreationFailed,
+ "Resources creation for on-premises cluster is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ err = r.startClusterOnPremisesIPsJob(c, bootstrap)
+ if err != nil {
+ l.Error(err, "Cannot start on-premises cluster IPs check job",
+ "cluster ID", c.Status.ID,
+ )
+
+ r.EventRecorder.Eventf(
+ c, models.Warning, models.CreationFailed,
+ "On-premises cluster IPs check job is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ l.Info(
+ "On-premises resources have been created",
+ "cluster name", c.Spec.Name,
+ "on-premises Spec", c.Spec.OnPremisesSpec,
+ "cluster ID", c.Status.ID,
+ )
+ return models.ExitReconcile, nil
+ }
return ctrl.Result{}, nil
}
-func (r *CadenceReconciler) HandleUpdateCluster(
+func (r *CadenceReconciler) handleUpdateCluster(
ctx context.Context,
- cadence *v1beta1.Cadence,
- logger logr.Logger,
+ c *v1beta1.Cadence,
+ l logr.Logger,
) (ctrl.Result, error) {
- iData, err := r.API.GetCadence(cadence.Status.ID)
+ iData, err := r.API.GetCadence(c.Status.ID)
if err != nil {
- logger.Error(
+ l.Error(
err, "Cannot get Cadence cluster from the Instaclustr API",
- "cluster name", cadence.Spec.Name,
- "cluster ID", cadence.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
)
- r.EventRecorder.Eventf(cadence, models.Warning, models.FetchFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.FetchFailed,
"Cluster fetch from the Instaclustr API is failed. Reason: %v", err)
return ctrl.Result{}, err
}
- iCadence, err := cadence.FromInstAPI(iData)
+ iCadence, err := c.FromInstAPI(iData)
if err != nil {
- logger.Error(
+ l.Error(
err, "Cannot convert Cadence cluster from the Instaclustr API",
- "cluster name", cadence.Spec.Name,
- "cluster ID", cadence.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
)
- r.EventRecorder.Eventf(cadence, models.Warning, models.ConvertionFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.ConversionFailed,
"Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err)
return ctrl.Result{}, err
}
if iCadence.Status.CurrentClusterOperationStatus != models.NoOperation {
- logger.Info("Cadence cluster is not ready to update",
+ l.Info("Cadence cluster is not ready to update",
"cluster name", iCadence.Spec.Name,
"cluster state", iCadence.Status.State,
"current operation status", iCadence.Status.CurrentClusterOperationStatus,
)
- patch := cadence.NewPatch()
- cadence.Annotations[models.ResourceStateAnnotation] = models.UpdatingEvent
- cadence.Annotations[models.UpdateQueuedAnnotation] = models.True
- err = r.Patch(ctx, cadence, patch)
+ patch := c.NewPatch()
+ c.Annotations[models.ResourceStateAnnotation] = models.UpdatingEvent
+ c.Annotations[models.UpdateQueuedAnnotation] = models.True
+ err = r.Patch(ctx, c, patch)
if err != nil {
- logger.Error(err, "Cannot patch Cadence cluster",
- "cluster name", cadence.Spec.Name,
+ l.Error(err, "Cannot patch Cadence cluster",
+ "cluster name", c.Spec.Name,
"patch", patch)
- r.EventRecorder.Eventf(cadence, models.Warning, models.PatchFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v", err)
return ctrl.Result{}, err
@@ -301,236 +386,271 @@ func (r *CadenceReconciler) HandleUpdateCluster(
return models.ReconcileRequeue, nil
}
- if cadence.Annotations[models.ExternalChangesAnnotation] == models.True {
- return r.handleExternalChanges(cadence, iCadence, logger)
+ if c.Annotations[models.ExternalChangesAnnotation] == models.True {
+ return r.handleExternalChanges(c, iCadence, l)
}
- if cadence.Spec.ClusterSettingsNeedUpdate(iCadence.Spec.Cluster) {
- logger.Info("Updating cluster settings",
+ if c.Spec.ClusterSettingsNeedUpdate(iCadence.Spec.Cluster) {
+ l.Info("Updating cluster settings",
"instaclustr description", iCadence.Spec.Description,
"instaclustr two factor delete", iCadence.Spec.TwoFactorDelete)
- err = r.API.UpdateClusterSettings(cadence.Status.ID, cadence.Spec.ClusterSettingsUpdateToInstAPI())
+ err = r.API.UpdateClusterSettings(c.Status.ID, c.Spec.ClusterSettingsUpdateToInstAPI())
if err != nil {
- logger.Error(err, "Cannot update cluster settings",
- "cluster ID", cadence.Status.ID, "cluster spec", cadence.Spec)
- r.EventRecorder.Eventf(cadence, models.Warning, models.UpdateFailed,
+ l.Error(err, "Cannot update cluster settings",
+ "cluster ID", c.Status.ID, "cluster spec", c.Spec)
+ r.EventRecorder.Eventf(c, models.Warning, models.UpdateFailed,
"Cannot update cluster settings. Reason: %v", err)
return ctrl.Result{}, err
}
}
- logger.Info("Update request to Instaclustr API has been sent",
- "spec data centres", cadence.Spec.DataCentres,
- "resize settings", cadence.Spec.ResizeSettings,
+ l.Info("Update request to Instaclustr API has been sent",
+ "spec data centres", c.Spec.DataCentres,
+ "resize settings", c.Spec.ResizeSettings,
)
- err = r.API.UpdateCluster(cadence.Status.ID, instaclustr.CadenceEndpoint, cadence.Spec.NewDCsUpdate())
+ err = r.API.UpdateCluster(c.Status.ID, instaclustr.CadenceEndpoint, c.Spec.NewDCsUpdate())
if err != nil {
- logger.Error(err, "Cannot update Cadence cluster",
- "cluster ID", cadence.Status.ID,
- "update request", cadence.Spec.NewDCsUpdate(),
+ l.Error(err, "Cannot update Cadence cluster",
+ "cluster ID", c.Status.ID,
+ "update request", c.Spec.NewDCsUpdate(),
)
- r.EventRecorder.Eventf(cadence, models.Warning, models.UpdateFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.UpdateFailed,
"Cluster update on the Instaclustr API is failed. Reason: %v", err)
return ctrl.Result{}, err
}
- patch := cadence.NewPatch()
- cadence.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent
- cadence.Annotations[models.UpdateQueuedAnnotation] = ""
- err = r.Patch(ctx, cadence, patch)
+ patch := c.NewPatch()
+ c.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent
+ c.Annotations[models.UpdateQueuedAnnotation] = ""
+ err = r.Patch(ctx, c, patch)
if err != nil {
- logger.Error(err, "Cannot patch Cadence cluster",
- "cluster name", cadence.Spec.Name,
+ l.Error(err, "Cannot patch Cadence cluster",
+ "cluster name", c.Spec.Name,
"patch", patch)
- r.EventRecorder.Eventf(cadence, models.Warning, models.PatchFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v", err)
return ctrl.Result{}, err
}
- logger.Info(
+ l.Info(
"Cluster has been updated",
- "cluster name", cadence.Spec.Name,
- "cluster ID", cadence.Status.ID,
- "data centres", cadence.Spec.DataCentres,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
+ "data centres", c.Spec.DataCentres,
)
return ctrl.Result{}, nil
}
-func (r *CadenceReconciler) handleExternalChanges(cadence, iCadence *v1beta1.Cadence, l logr.Logger) (ctrl.Result, error) {
- if !cadence.Spec.AreDCsEqual(iCadence.Spec.DataCentres) {
+func (r *CadenceReconciler) handleExternalChanges(c, iCadence *v1beta1.Cadence, l logr.Logger) (ctrl.Result, error) {
+ if !c.Spec.AreDCsEqual(iCadence.Spec.DataCentres) {
l.Info(msgExternalChanges,
"instaclustr data", iCadence.Spec.DataCentres,
- "k8s resource spec", cadence.Spec.DataCentres)
+ "k8s resource spec", c.Spec.DataCentres)
- msgDiffSpecs, err := createSpecDifferenceMessage(cadence.Spec.DataCentres, iCadence.Spec.DataCentres)
+ msgDiffSpecs, err := createSpecDifferenceMessage(c.Spec.DataCentres, iCadence.Spec.DataCentres)
if err != nil {
l.Error(err, "Cannot create specification difference message",
- "instaclustr data", iCadence.Spec, "k8s resource spec", cadence.Spec)
+ "instaclustr data", iCadence.Spec, "k8s resource spec", c.Spec)
return ctrl.Result{}, err
}
- r.EventRecorder.Eventf(cadence, models.Warning, models.ExternalChanges, msgDiffSpecs)
+ r.EventRecorder.Eventf(c, models.Warning, models.ExternalChanges, msgDiffSpecs)
return ctrl.Result{}, nil
}
- patch := cadence.NewPatch()
+ patch := c.NewPatch()
- cadence.Annotations[models.ExternalChangesAnnotation] = ""
+ c.Annotations[models.ExternalChangesAnnotation] = ""
- err := r.Patch(context.Background(), cadence, patch)
+ err := r.Patch(context.Background(), c, patch)
if err != nil {
l.Error(err, "Cannot patch cluster resource",
- "cluster name", cadence.Spec.Name, "cluster ID", cadence.Status.ID)
+ "cluster name", c.Spec.Name, "cluster ID", c.Status.ID)
- r.EventRecorder.Eventf(cadence, models.Warning, models.PatchFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v", err)
return ctrl.Result{}, err
}
- l.Info("External changes have been reconciled", "resource ID", cadence.Status.ID)
- r.EventRecorder.Event(cadence, models.Normal, models.ExternalChanges, "External changes have been reconciled")
+ l.Info("External changes have been reconciled", "resource ID", c.Status.ID)
+ r.EventRecorder.Event(c, models.Normal, models.ExternalChanges, "External changes have been reconciled")
return ctrl.Result{}, nil
}
-func (r *CadenceReconciler) HandleDeleteCluster(
+func (r *CadenceReconciler) handleDeleteCluster(
ctx context.Context,
- cadence *v1beta1.Cadence,
- logger logr.Logger,
+ c *v1beta1.Cadence,
+ l logr.Logger,
) (ctrl.Result, error) {
- _, err := r.API.GetCadence(cadence.Status.ID)
+ _, err := r.API.GetCadence(c.Status.ID)
if err != nil && !errors.Is(err, instaclustr.NotFound) {
- logger.Error(
+ l.Error(
err, "Cannot get Cadence cluster status from the Instaclustr API",
- "cluster name", cadence.Spec.Name,
- "cluster ID", cadence.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
)
- r.EventRecorder.Eventf(cadence, models.Warning, models.FetchFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.FetchFailed,
"Cluster resource fetch from the Instaclustr API is failed. Reason: %v", err)
return ctrl.Result{}, err
}
if !errors.Is(err, instaclustr.NotFound) {
- logger.Info("Sending cluster deletion to the Instaclustr API",
- "cluster name", cadence.Spec.Name,
- "cluster ID", cadence.Status.ID)
+ l.Info("Sending cluster deletion to the Instaclustr API",
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID)
- err = r.API.DeleteCluster(cadence.Status.ID, instaclustr.CadenceEndpoint)
+ err = r.API.DeleteCluster(c.Status.ID, instaclustr.CadenceEndpoint)
if err != nil {
- logger.Error(err, "Cannot delete Cadence cluster",
- "cluster name", cadence.Spec.Name,
- "cluster status", cadence.Status,
+ l.Error(err, "Cannot delete Cadence cluster",
+ "cluster name", c.Spec.Name,
+ "cluster status", c.Status,
)
- r.EventRecorder.Eventf(cadence, models.Warning, models.DeletionFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.DeletionFailed,
"Cluster deletion is failed on the Instaclustr. Reason: %v", err)
return ctrl.Result{}, err
}
- r.EventRecorder.Event(cadence, models.Normal, models.DeletionStarted,
+ r.EventRecorder.Event(c, models.Normal, models.DeletionStarted,
"Cluster deletion request is sent to the Instaclustr API.")
- if cadence.Spec.TwoFactorDelete != nil {
- patch := cadence.NewPatch()
+ if c.Spec.TwoFactorDelete != nil {
+ patch := c.NewPatch()
- cadence.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent
- cadence.Annotations[models.ClusterDeletionAnnotation] = models.Triggered
- err = r.Patch(ctx, cadence, patch)
+ c.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent
+ c.Annotations[models.ClusterDeletionAnnotation] = models.Triggered
+ err = r.Patch(ctx, c, patch)
if err != nil {
- logger.Error(err, "Cannot patch cluster resource",
- "cluster name", cadence.Spec.Name,
- "cluster state", cadence.Status.State)
- r.EventRecorder.Eventf(cadence, models.Warning, models.PatchFailed,
+ l.Error(err, "Cannot patch cluster resource",
+ "cluster name", c.Spec.Name,
+ "cluster state", c.Status.State)
+ r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v",
err)
return ctrl.Result{}, err
}
- logger.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", cadence.Status.ID)
+ l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", c.Status.ID)
- r.EventRecorder.Event(cadence, models.Normal, models.DeletionStarted,
+ r.EventRecorder.Event(c, models.Normal, models.DeletionStarted,
"Two-Factor Delete is enabled, please confirm cluster deletion via email or phone.")
-
return ctrl.Result{}, nil
}
+ if c.Spec.OnPremisesSpec != nil {
+ err = deleteOnPremResources(ctx, r.Client, c.Status.ID, c.Namespace)
+ if err != nil {
+ l.Error(err, "Cannot delete cluster on-premises resources",
+ "cluster ID", c.Status.ID)
+ r.EventRecorder.Eventf(c, models.Warning, models.DeletionFailed,
+ "Cluster on-premises resources deletion is failed. Reason: %v", err)
+ return reconcile.Result{}, err
+ }
+
+ l.Info("Cluster on-premises resources are deleted",
+ "cluster ID", c.Status.ID)
+ r.EventRecorder.Eventf(c, models.Normal, models.Deleted,
+ "Cluster on-premises resources are deleted")
+
+ patch := c.NewPatch()
+ controllerutil.RemoveFinalizer(c, models.DeletionFinalizer)
+
+ err = r.Patch(ctx, c, patch)
+ if err != nil {
+ l.Error(err, "Cannot patch cluster resource",
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
+ "kind", c.Kind,
+ "api Version", c.APIVersion,
+ "namespace", c.Namespace,
+ "cluster metadata", c.ObjectMeta,
+ )
+ r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed,
+ "Cluster resource patch is failed. Reason: %v", err)
+ return reconcile.Result{}, err
+ }
+
+ return reconcile.Result{}, err
+ }
+ r.Scheduler.RemoveJob(c.GetJobID(scheduler.OnPremisesIPsChecker))
}
- logger.Info("Cadence cluster is being deleted",
- "cluster name", cadence.Spec.Name,
- "cluster status", cadence.Status)
+ l.Info("Cadence cluster is being deleted",
+ "cluster name", c.Spec.Name,
+ "cluster status", c.Status)
- for _, packagedProvisioning := range cadence.Spec.PackagedProvisioning {
- err = r.deletePackagedResources(ctx, cadence, packagedProvisioning)
+ for _, packagedProvisioning := range c.Spec.PackagedProvisioning {
+ err = r.deletePackagedResources(ctx, c, packagedProvisioning)
if err != nil {
- logger.Error(
+ l.Error(
err, "Cannot delete Cadence packaged resources",
- "cluster name", cadence.Spec.Name,
- "cluster ID", cadence.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
)
- r.EventRecorder.Eventf(cadence, models.Warning, models.DeletionFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.DeletionFailed,
"Cannot delete Cadence packaged resources. Reason: %v", err)
return ctrl.Result{}, err
}
}
- r.Scheduler.RemoveJob(cadence.GetJobID(scheduler.StatusChecker))
- patch := cadence.NewPatch()
- controllerutil.RemoveFinalizer(cadence, models.DeletionFinalizer)
- cadence.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent
+ r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker))
+ patch := c.NewPatch()
+ controllerutil.RemoveFinalizer(c, models.DeletionFinalizer)
+ c.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent
- err = r.Patch(ctx, cadence, patch)
+ err = r.Patch(ctx, c, patch)
if err != nil {
- logger.Error(err, "Cannot patch Cadence cluster",
- "cluster name", cadence.Spec.Name,
+ l.Error(err, "Cannot patch Cadence cluster",
+ "cluster name", c.Spec.Name,
"patch", patch,
)
return ctrl.Result{}, err
}
- err = exposeservice.Delete(r.Client, cadence.Name, cadence.Namespace)
+ err = exposeservice.Delete(r.Client, c.Name, c.Namespace)
if err != nil {
- logger.Error(err, "Cannot delete Cadence cluster expose service",
- "cluster ID", cadence.Status.ID,
- "cluster name", cadence.Spec.Name,
+ l.Error(err, "Cannot delete Cadence cluster expose service",
+ "cluster ID", c.Status.ID,
+ "cluster name", c.Spec.Name,
)
return ctrl.Result{}, err
}
- logger.Info("Cadence cluster was deleted",
- "cluster name", cadence.Spec.Name,
- "cluster ID", cadence.Status.ID,
+ l.Info("Cadence cluster was deleted",
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
)
- r.EventRecorder.Event(cadence, models.Normal, models.Deleted, "Cluster resource is deleted")
+ r.EventRecorder.Event(c, models.Normal, models.Deleted, "Cluster resource is deleted")
return ctrl.Result{}, nil
}
func (r *CadenceReconciler) preparePackagedSolution(
ctx context.Context,
- cluster *v1beta1.Cadence,
+ c *v1beta1.Cadence,
packagedProvisioning *v1beta1.PackagedProvisioning,
) (bool, error) {
- if len(cluster.Spec.DataCentres) < 1 {
+ if len(c.Spec.DataCentres) < 1 {
return false, models.ErrZeroDataCentres
}
- labelsToQuery := fmt.Sprintf("%s=%s", models.ControlledByLabel, cluster.Name)
+ labelsToQuery := fmt.Sprintf("%s=%s", models.ControlledByLabel, c.Name)
selector, err := labels.Parse(labelsToQuery)
if err != nil {
return false, err
@@ -555,7 +675,7 @@ func (r *CadenceReconciler) preparePackagedSolution(
models.CassandraAppKind)
}
- cassandraSpec, err := r.newCassandraSpec(cluster, cassandraVersions[len(cassandraVersions)-1].String())
+ cassandraSpec, err := r.newCassandraSpec(c, cassandraVersions[len(cassandraVersions)-1].String())
if err != nil {
return false, err
}
@@ -593,7 +713,7 @@ func (r *CadenceReconciler) preparePackagedSolution(
models.KafkaAppType)
}
- kafkaSpec, err := r.newKafkaSpec(cluster, kafkaVersions[len(kafkaVersions)-1].String())
+ kafkaSpec, err := r.newKafkaSpec(c, kafkaVersions[len(kafkaVersions)-1].String())
if err != nil {
return false, err
}
@@ -631,7 +751,7 @@ func (r *CadenceReconciler) preparePackagedSolution(
}
// For OpenSearch we cannot use the latest version because is not supported by Cadence. So we use the oldest one.
- osSpec, err := r.newOpenSearchSpec(cluster, openSearchVersions[0].String())
+ osSpec, err := r.newOpenSearchSpec(c, openSearchVersions[0].String())
if err != nil {
return false, err
}
@@ -657,7 +777,7 @@ func (r *CadenceReconciler) preparePackagedSolution(
return true, nil
}
- cluster.Spec.StandardProvisioning = append(cluster.Spec.StandardProvisioning, &v1beta1.StandardProvisioning{
+ c.Spec.StandardProvisioning = append(c.Spec.StandardProvisioning, &v1beta1.StandardProvisioning{
AdvancedVisibility: advancedVisibilities,
TargetCassandra: &v1beta1.TargetCassandra{
DependencyCDCID: cassandraList.Items[0].Status.DataCentres[0].ID,
@@ -668,42 +788,42 @@ func (r *CadenceReconciler) preparePackagedSolution(
return false, nil
}
-func (r *CadenceReconciler) newCassandraSpec(cadence *v1beta1.Cadence, latestCassandraVersion string) (*v1beta1.Cassandra, error) {
+func (r *CadenceReconciler) newCassandraSpec(c *v1beta1.Cadence, latestCassandraVersion string) (*v1beta1.Cassandra, error) {
typeMeta := v1.TypeMeta{
Kind: models.CassandraKind,
APIVersion: models.ClustersV1beta1APIVersion,
}
metadata := v1.ObjectMeta{
- Name: models.CassandraChildPrefix + cadence.Name,
- Labels: map[string]string{models.ControlledByLabel: cadence.Name},
+ Name: models.CassandraChildPrefix + c.Name,
+ Labels: map[string]string{models.ControlledByLabel: c.Name},
Annotations: map[string]string{models.ResourceStateAnnotation: models.CreatingEvent},
- Namespace: cadence.ObjectMeta.Namespace,
+ Namespace: c.ObjectMeta.Namespace,
Finalizers: []string{},
}
- if len(cadence.Spec.DataCentres) == 0 {
+ if len(c.Spec.DataCentres) == 0 {
return nil, models.ErrZeroDataCentres
}
- slaTier := cadence.Spec.SLATier
- privateClusterNetwork := cadence.Spec.PrivateNetworkCluster
- pciCompliance := cadence.Spec.PCICompliance
+ slaTier := c.Spec.SLATier
+ privateClusterNetwork := c.Spec.PrivateNetworkCluster
+ pciCompliance := c.Spec.PCICompliance
var twoFactorDelete []*v1beta1.TwoFactorDelete
- if len(cadence.Spec.TwoFactorDelete) > 0 {
+ if len(c.Spec.TwoFactorDelete) > 0 {
twoFactorDelete = []*v1beta1.TwoFactorDelete{
{
- Email: cadence.Spec.TwoFactorDelete[0].Email,
- Phone: cadence.Spec.TwoFactorDelete[0].Phone,
+ Email: c.Spec.TwoFactorDelete[0].Email,
+ Phone: c.Spec.TwoFactorDelete[0].Phone,
},
}
}
var cassNodeSize, network string
var cassNodesNumber, cassReplicationFactor int
var cassPrivateIPBroadcastForDiscovery, cassPasswordAndUserAuth bool
- for _, dc := range cadence.Spec.DataCentres {
- for _, pp := range cadence.Spec.PackagedProvisioning {
+ for _, dc := range c.Spec.DataCentres {
+ for _, pp := range c.Spec.PackagedProvisioning {
cassNodeSize = pp.BundledCassandraSpec.NodeSize
network = pp.BundledCassandraSpec.Network
cassNodesNumber = pp.BundledCassandraSpec.NodesNumber
@@ -722,9 +842,9 @@ func (r *CadenceReconciler) newCassandraSpec(cadence *v1beta1.Cadence, latestCas
}
dcName := models.CassandraChildDCName
- dcRegion := cadence.Spec.DataCentres[0].Region
- cloudProvider := cadence.Spec.DataCentres[0].CloudProvider
- providerAccountName := cadence.Spec.DataCentres[0].ProviderAccountName
+ dcRegion := c.Spec.DataCentres[0].Region
+ cloudProvider := c.Spec.DataCentres[0].CloudProvider
+ providerAccountName := c.Spec.DataCentres[0].ProviderAccountName
cassandraDataCentres := []*v1beta1.CassandraDataCentre{
{
@@ -743,7 +863,7 @@ func (r *CadenceReconciler) newCassandraSpec(cadence *v1beta1.Cadence, latestCas
}
spec := v1beta1.CassandraSpec{
Cluster: v1beta1.Cluster{
- Name: models.CassandraChildPrefix + cadence.Name,
+ Name: models.CassandraChildPrefix + c.Name,
Version: latestCassandraVersion,
SLATier: slaTier,
PrivateNetworkCluster: privateClusterNetwork,
@@ -762,10 +882,21 @@ func (r *CadenceReconciler) newCassandraSpec(cadence *v1beta1.Cadence, latestCas
}, nil
}
-func (r *CadenceReconciler) startClusterStatusJob(cadence *v1beta1.Cadence) error {
- job := r.newWatchStatusJob(cadence)
+func (r *CadenceReconciler) startClusterOnPremisesIPsJob(c *v1beta1.Cadence, b *onPremisesBootstrap) error {
+ job := newWatchOnPremisesIPsJob(c.Kind, b)
+
+ err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *CadenceReconciler) startClusterStatusJob(c *v1beta1.Cadence) error {
+ job := r.newWatchStatusJob(c)
- err := r.Scheduler.ScheduleJob(cadence.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job)
+ err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job)
if err != nil {
return err
}
@@ -773,66 +904,66 @@ func (r *CadenceReconciler) startClusterStatusJob(cadence *v1beta1.Cadence) erro
return nil
}
-func (r *CadenceReconciler) newWatchStatusJob(cadence *v1beta1.Cadence) scheduler.Job {
+func (r *CadenceReconciler) newWatchStatusJob(c *v1beta1.Cadence) scheduler.Job {
l := log.Log.WithValues("component", "cadenceStatusClusterJob")
return func() error {
- namespacedName := client.ObjectKeyFromObject(cadence)
- err := r.Get(context.Background(), namespacedName, cadence)
+ namespacedName := client.ObjectKeyFromObject(c)
+ err := r.Get(context.Background(), namespacedName, c)
if k8serrors.IsNotFound(err) {
l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.",
"namespaced name", namespacedName)
- r.Scheduler.RemoveJob(cadence.GetJobID(scheduler.StatusChecker))
+ r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker))
return nil
}
if err != nil {
l.Error(err, "Cannot get Cadence custom resource",
- "resource name", cadence.Name,
+ "resource name", c.Name,
)
return err
}
- iData, err := r.API.GetCadence(cadence.Status.ID)
+ iData, err := r.API.GetCadence(c.Status.ID)
if err != nil {
if errors.Is(err, instaclustr.NotFound) {
- if cadence.DeletionTimestamp != nil {
- _, err = r.HandleDeleteCluster(context.Background(), cadence, l)
+ if c.DeletionTimestamp != nil {
+ _, err = r.handleDeleteCluster(context.Background(), c, l)
return err
}
- return r.handleExternalDelete(context.Background(), cadence)
+ return r.handleExternalDelete(context.Background(), c)
}
l.Error(err, "Cannot get Cadence cluster from the Instaclustr API",
- "clusterID", cadence.Status.ID,
+ "clusterID", c.Status.ID,
)
return err
}
- iCadence, err := cadence.FromInstAPI(iData)
+ iCadence, err := c.FromInstAPI(iData)
if err != nil {
l.Error(err, "Cannot convert Cadence cluster from the Instaclustr API",
- "clusterID", cadence.Status.ID,
+ "clusterID", c.Status.ID,
)
return err
}
- if !areStatusesEqual(&iCadence.Status.ClusterStatus, &cadence.Status.ClusterStatus) ||
- !areSecondaryCadenceTargetsEqual(cadence.Status.TargetSecondaryCadence, iCadence.Status.TargetSecondaryCadence) {
+ if !areStatusesEqual(&iCadence.Status.ClusterStatus, &c.Status.ClusterStatus) ||
+ !areSecondaryCadenceTargetsEqual(c.Status.TargetSecondaryCadence, iCadence.Status.TargetSecondaryCadence) {
l.Info("Updating Cadence cluster status",
"new status", iCadence.Status.ClusterStatus,
- "old status", cadence.Status.ClusterStatus,
+ "old status", c.Status.ClusterStatus,
)
- areDCsEqual := areDataCentresEqual(iCadence.Status.ClusterStatus.DataCentres, cadence.Status.ClusterStatus.DataCentres)
+ areDCsEqual := areDataCentresEqual(iCadence.Status.ClusterStatus.DataCentres, c.Status.ClusterStatus.DataCentres)
- patch := cadence.NewPatch()
- cadence.Status.ClusterStatus = iCadence.Status.ClusterStatus
- cadence.Status.TargetSecondaryCadence = iCadence.Status.TargetSecondaryCadence
- err = r.Status().Patch(context.Background(), cadence, patch)
+ patch := c.NewPatch()
+ c.Status.ClusterStatus = iCadence.Status.ClusterStatus
+ c.Status.TargetSecondaryCadence = iCadence.Status.TargetSecondaryCadence
+ err = r.Status().Patch(context.Background(), c, patch)
if err != nil {
l.Error(err, "Cannot patch Cadence cluster",
- "cluster name", cadence.Spec.Name,
- "status", cadence.Status.State,
+ "cluster name", c.Spec.Name,
+ "status", c.Status.State,
)
return err
}
@@ -845,9 +976,9 @@ func (r *CadenceReconciler) newWatchStatusJob(cadence *v1beta1.Cadence) schedule
}
err = exposeservice.Create(r.Client,
- cadence.Name,
- cadence.Namespace,
- cadence.Spec.PrivateNetworkCluster,
+ c.Name,
+ c.Namespace,
+ c.Spec.PrivateNetworkCluster,
nodes,
models.CadenceConnectionPort)
if err != nil {
@@ -857,50 +988,50 @@ func (r *CadenceReconciler) newWatchStatusJob(cadence *v1beta1.Cadence) schedule
}
if iCadence.Status.CurrentClusterOperationStatus == models.NoOperation &&
- cadence.Annotations[models.ResourceStateAnnotation] != models.UpdatingEvent &&
- cadence.Annotations[models.UpdateQueuedAnnotation] != models.True &&
- !cadence.Spec.AreDCsEqual(iCadence.Spec.DataCentres) {
+ c.Annotations[models.ResourceStateAnnotation] != models.UpdatingEvent &&
+ c.Annotations[models.UpdateQueuedAnnotation] != models.True &&
+ !c.Spec.AreDCsEqual(iCadence.Spec.DataCentres) {
l.Info(msgExternalChanges,
"instaclustr data", iCadence.Spec.DataCentres,
- "k8s resource spec", cadence.Spec.DataCentres)
+ "k8s resource spec", c.Spec.DataCentres)
- patch := cadence.NewPatch()
- cadence.Annotations[models.ExternalChangesAnnotation] = models.True
+ patch := c.NewPatch()
+ c.Annotations[models.ExternalChangesAnnotation] = models.True
- err = r.Patch(context.Background(), cadence, patch)
+ err = r.Patch(context.Background(), c, patch)
if err != nil {
l.Error(err, "Cannot patch cluster cluster",
- "cluster name", cadence.Spec.Name, "cluster state", cadence.Status.State)
+ "cluster name", c.Spec.Name, "cluster state", c.Status.State)
return err
}
- msgDiffSpecs, err := createSpecDifferenceMessage(cadence.Spec.DataCentres, iCadence.Spec.DataCentres)
+ msgDiffSpecs, err := createSpecDifferenceMessage(c.Spec.DataCentres, iCadence.Spec.DataCentres)
if err != nil {
l.Error(err, "Cannot create specification difference message",
- "instaclustr data", iCadence.Spec, "k8s resource spec", cadence.Spec)
+ "instaclustr data", iCadence.Spec, "k8s resource spec", c.Spec)
return err
}
- r.EventRecorder.Eventf(cadence, models.Warning, models.ExternalChanges, msgDiffSpecs)
+ r.EventRecorder.Eventf(c, models.Warning, models.ExternalChanges, msgDiffSpecs)
}
//TODO: change all context.Background() and context.TODO() to ctx from Reconcile
- err = r.reconcileMaintenanceEvents(context.Background(), cadence)
+ err = r.reconcileMaintenanceEvents(context.Background(), c)
if err != nil {
l.Error(err, "Cannot reconcile cluster maintenance events",
- "cluster name", cadence.Spec.Name,
- "cluster ID", cadence.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
)
return err
}
- if cadence.Status.State == models.RunningStatus && cadence.Status.CurrentClusterOperationStatus == models.OperationInProgress {
- patch := cadence.NewPatch()
- for _, dc := range cadence.Status.DataCentres {
+ if c.Status.State == models.RunningStatus && c.Status.CurrentClusterOperationStatus == models.OperationInProgress {
+ patch := c.NewPatch()
+ for _, dc := range c.Status.DataCentres {
resizeOperations, err := r.API.GetResizeOperationsByClusterDataCentreID(dc.ID)
if err != nil {
l.Error(err, "Cannot get data centre resize operations",
- "cluster name", cadence.Spec.Name,
- "cluster ID", cadence.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
"data centre ID", dc.ID,
)
@@ -908,11 +1039,11 @@ func (r *CadenceReconciler) newWatchStatusJob(cadence *v1beta1.Cadence) schedule
}
dc.ResizeOperations = resizeOperations
- err = r.Status().Patch(context.Background(), cadence, patch)
+ err = r.Status().Patch(context.Background(), c, patch)
if err != nil {
l.Error(err, "Cannot patch data centre resize operations",
- "cluster name", cadence.Spec.Name,
- "cluster ID", cadence.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
"data centre ID", dc.ID,
)
@@ -925,36 +1056,36 @@ func (r *CadenceReconciler) newWatchStatusJob(cadence *v1beta1.Cadence) schedule
}
}
-func (r *CadenceReconciler) newKafkaSpec(cadence *v1beta1.Cadence, latestKafkaVersion string) (*v1beta1.Kafka, error) {
+func (r *CadenceReconciler) newKafkaSpec(c *v1beta1.Cadence, latestKafkaVersion string) (*v1beta1.Kafka, error) {
typeMeta := v1.TypeMeta{
Kind: models.KafkaKind,
APIVersion: models.ClustersV1beta1APIVersion,
}
metadata := v1.ObjectMeta{
- Name: models.KafkaChildPrefix + cadence.Name,
- Labels: map[string]string{models.ControlledByLabel: cadence.Name},
+ Name: models.KafkaChildPrefix + c.Name,
+ Labels: map[string]string{models.ControlledByLabel: c.Name},
Annotations: map[string]string{models.ResourceStateAnnotation: models.CreatingEvent},
- Namespace: cadence.ObjectMeta.Namespace,
+ Namespace: c.ObjectMeta.Namespace,
Finalizers: []string{},
}
- if len(cadence.Spec.DataCentres) == 0 {
+ if len(c.Spec.DataCentres) == 0 {
return nil, models.ErrZeroDataCentres
}
var kafkaTFD []*v1beta1.TwoFactorDelete
- for _, cadenceTFD := range cadence.Spec.TwoFactorDelete {
+ for _, cadenceTFD := range c.Spec.TwoFactorDelete {
twoFactorDelete := &v1beta1.TwoFactorDelete{
Email: cadenceTFD.Email,
Phone: cadenceTFD.Phone,
}
kafkaTFD = append(kafkaTFD, twoFactorDelete)
}
- bundledKafkaSpec := cadence.Spec.PackagedProvisioning[0].BundledKafkaSpec
+ bundledKafkaSpec := c.Spec.PackagedProvisioning[0].BundledKafkaSpec
kafkaNetwork := bundledKafkaSpec.Network
- for _, cadenceDC := range cadence.Spec.DataCentres {
+ for _, cadenceDC := range c.Spec.DataCentres {
isKafkaNetworkOverlaps, err := cadenceDC.IsNetworkOverlaps(kafkaNetwork)
if err != nil {
return nil, err
@@ -967,9 +1098,9 @@ func (r *CadenceReconciler) newKafkaSpec(cadence *v1beta1.Cadence, latestKafkaVe
kafkaNodeSize := bundledKafkaSpec.NodeSize
kafkaNodesNumber := bundledKafkaSpec.NodesNumber
dcName := models.KafkaChildDCName
- dcRegion := cadence.Spec.DataCentres[0].Region
- cloudProvider := cadence.Spec.DataCentres[0].CloudProvider
- providerAccountName := cadence.Spec.DataCentres[0].ProviderAccountName
+ dcRegion := c.Spec.DataCentres[0].Region
+ cloudProvider := c.Spec.DataCentres[0].CloudProvider
+ providerAccountName := c.Spec.DataCentres[0].ProviderAccountName
kafkaDataCentres := []*v1beta1.KafkaDataCentre{
{
DataCentre: v1beta1.DataCentre{
@@ -984,13 +1115,13 @@ func (r *CadenceReconciler) newKafkaSpec(cadence *v1beta1.Cadence, latestKafkaVe
},
}
- slaTier := cadence.Spec.SLATier
- privateClusterNetwork := cadence.Spec.PrivateNetworkCluster
- pciCompliance := cadence.Spec.PCICompliance
- clientEncryption := cadence.Spec.DataCentres[0].ClientEncryption
+ slaTier := c.Spec.SLATier
+ privateClusterNetwork := c.Spec.PrivateNetworkCluster
+ pciCompliance := c.Spec.PCICompliance
+ clientEncryption := c.Spec.DataCentres[0].ClientEncryption
spec := v1beta1.KafkaSpec{
Cluster: v1beta1.Cluster{
- Name: models.KafkaChildPrefix + cadence.Name,
+ Name: models.KafkaChildPrefix + c.Name,
Version: latestKafkaVersion,
SLATier: slaTier,
PrivateNetworkCluster: privateClusterNetwork,
@@ -1013,25 +1144,25 @@ func (r *CadenceReconciler) newKafkaSpec(cadence *v1beta1.Cadence, latestKafkaVe
}, nil
}
-func (r *CadenceReconciler) newOpenSearchSpec(cadence *v1beta1.Cadence, oldestOpenSearchVersion string) (*v1beta1.OpenSearch, error) {
+func (r *CadenceReconciler) newOpenSearchSpec(c *v1beta1.Cadence, oldestOpenSearchVersion string) (*v1beta1.OpenSearch, error) {
typeMeta := v1.TypeMeta{
Kind: models.OpenSearchKind,
APIVersion: models.ClustersV1beta1APIVersion,
}
metadata := v1.ObjectMeta{
- Name: models.OpenSearchChildPrefix + cadence.Name,
- Labels: map[string]string{models.ControlledByLabel: cadence.Name},
+ Name: models.OpenSearchChildPrefix + c.Name,
+ Labels: map[string]string{models.ControlledByLabel: c.Name},
Annotations: map[string]string{models.ResourceStateAnnotation: models.CreatingEvent},
- Namespace: cadence.ObjectMeta.Namespace,
+ Namespace: c.ObjectMeta.Namespace,
Finalizers: []string{},
}
- if len(cadence.Spec.DataCentres) < 1 {
+ if len(c.Spec.DataCentres) < 1 {
return nil, models.ErrZeroDataCentres
}
- bundledOpenSearchSpec := cadence.Spec.PackagedProvisioning[0].BundledOpenSearchSpec
+ bundledOpenSearchSpec := c.Spec.PackagedProvisioning[0].BundledOpenSearchSpec
managerNodes := []*v1beta1.ClusterManagerNodes{{
NodeSize: bundledOpenSearchSpec.NodeSize,
@@ -1039,22 +1170,22 @@ func (r *CadenceReconciler) newOpenSearchSpec(cadence *v1beta1.Cadence, oldestOp
}}
osReplicationFactor := bundledOpenSearchSpec.ReplicationFactor
- slaTier := cadence.Spec.SLATier
- privateClusterNetwork := cadence.Spec.PrivateNetworkCluster
- pciCompliance := cadence.Spec.PCICompliance
+ slaTier := c.Spec.SLATier
+ privateClusterNetwork := c.Spec.PrivateNetworkCluster
+ pciCompliance := c.Spec.PCICompliance
var twoFactorDelete []*v1beta1.TwoFactorDelete
- if len(cadence.Spec.TwoFactorDelete) > 0 {
+ if len(c.Spec.TwoFactorDelete) > 0 {
twoFactorDelete = []*v1beta1.TwoFactorDelete{
{
- Email: cadence.Spec.TwoFactorDelete[0].Email,
- Phone: cadence.Spec.TwoFactorDelete[0].Phone,
+ Email: c.Spec.TwoFactorDelete[0].Email,
+ Phone: c.Spec.TwoFactorDelete[0].Phone,
},
}
}
osNetwork := bundledOpenSearchSpec.Network
- isOsNetworkOverlaps, err := cadence.Spec.DataCentres[0].IsNetworkOverlaps(osNetwork)
+ isOsNetworkOverlaps, err := c.Spec.DataCentres[0].IsNetworkOverlaps(osNetwork)
if err != nil {
return nil, err
}
@@ -1063,9 +1194,9 @@ func (r *CadenceReconciler) newOpenSearchSpec(cadence *v1beta1.Cadence, oldestOp
}
dcName := models.OpenSearchChildDCName
- dcRegion := cadence.Spec.DataCentres[0].Region
- cloudProvider := cadence.Spec.DataCentres[0].CloudProvider
- providerAccountName := cadence.Spec.DataCentres[0].ProviderAccountName
+ dcRegion := c.Spec.DataCentres[0].Region
+ cloudProvider := c.Spec.DataCentres[0].CloudProvider
+ providerAccountName := c.Spec.DataCentres[0].ProviderAccountName
osDataCentres := []*v1beta1.OpenSearchDataCentre{
{
@@ -1079,7 +1210,7 @@ func (r *CadenceReconciler) newOpenSearchSpec(cadence *v1beta1.Cadence, oldestOp
}
spec := v1beta1.OpenSearchSpec{
Cluster: v1beta1.Cluster{
- Name: models.OpenSearchChildPrefix + cadence.Name,
+ Name: models.OpenSearchChildPrefix + c.Name,
Version: oldestOpenSearchVersion,
SLATier: slaTier,
PrivateNetworkCluster: privateClusterNetwork,
@@ -1100,10 +1231,10 @@ func (r *CadenceReconciler) newOpenSearchSpec(cadence *v1beta1.Cadence, oldestOp
func (r *CadenceReconciler) deletePackagedResources(
ctx context.Context,
- cadence *v1beta1.Cadence,
+ c *v1beta1.Cadence,
packagedProvisioning *v1beta1.PackagedProvisioning,
) error {
- labelsToQuery := fmt.Sprintf("%s=%s", models.ControlledByLabel, cadence.Name)
+ labelsToQuery := fmt.Sprintf("%s=%s", models.ControlledByLabel, c.Name)
selector, err := labels.Parse(labelsToQuery)
if err != nil {
return err
diff --git a/controllers/clusters/cassandra_controller.go b/controllers/clusters/cassandra_controller.go
index c69128726..610cf7ea4 100644
--- a/controllers/clusters/cassandra_controller.go
+++ b/controllers/clusters/cassandra_controller.go
@@ -45,10 +45,6 @@ import (
"github.com/instaclustr/operator/pkg/scheduler"
)
-const (
- StatusRUNNING = "RUNNING"
-)
-
// CassandraReconciler reconciles a Cassandra object
type CassandraReconciler struct {
client.Client
@@ -62,8 +58,15 @@ type CassandraReconciler struct {
//+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=cassandras/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=cassandras/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch
-//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=endpoints,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
@@ -73,8 +76,8 @@ type CassandraReconciler struct {
func (r *CassandraReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
l := log.FromContext(ctx)
- cassandra := &v1beta1.Cassandra{}
- err := r.Client.Get(ctx, req.NamespacedName, cassandra)
+ c := &v1beta1.Cassandra{}
+ err := r.Client.Get(ctx, req.NamespacedName, c)
if err != nil {
if k8serrors.IsNotFound(err) {
l.Info("Cassandra resource is not found",
@@ -87,23 +90,23 @@ func (r *CassandraReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return reconcile.Result{}, err
}
- switch cassandra.Annotations[models.ResourceStateAnnotation] {
+ switch c.Annotations[models.ResourceStateAnnotation] {
case models.CreatingEvent:
- return r.handleCreateCluster(ctx, l, cassandra)
+ return r.handleCreateCluster(ctx, l, c)
case models.UpdatingEvent:
- return r.handleUpdateCluster(ctx, l, cassandra)
+ return r.handleUpdateCluster(ctx, l, c)
case models.DeletingEvent:
- return r.handleDeleteCluster(ctx, l, cassandra)
+ return r.handleDeleteCluster(ctx, l, c)
case models.GenericEvent:
l.Info("Event isn't handled",
- "cluster name", cassandra.Spec.Name,
+ "cluster name", c.Spec.Name,
"request", req,
- "event", cassandra.Annotations[models.ResourceStateAnnotation])
+ "event", c.Annotations[models.ResourceStateAnnotation])
return models.ExitReconcile, nil
default:
l.Info("Event isn't handled",
"request", req,
- "event", cassandra.Annotations[models.ResourceStateAnnotation])
+ "event", c.Annotations[models.ResourceStateAnnotation])
return models.ExitReconcile, nil
}
@@ -112,27 +115,27 @@ func (r *CassandraReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
func (r *CassandraReconciler) handleCreateCluster(
ctx context.Context,
l logr.Logger,
- cassandra *v1beta1.Cassandra,
+ c *v1beta1.Cassandra,
) (reconcile.Result, error) {
l = l.WithName("Cassandra creation event")
var err error
- patch := cassandra.NewPatch()
- if cassandra.Status.ID == "" {
+ patch := c.NewPatch()
+ if c.Status.ID == "" {
var id string
- if cassandra.Spec.HasRestore() {
+ if c.Spec.HasRestore() {
l.Info(
"Creating cluster from backup",
- "original cluster ID", cassandra.Spec.RestoreFrom.ClusterID,
+ "original cluster ID", c.Spec.RestoreFrom.ClusterID,
)
- id, err = r.API.RestoreCluster(cassandra.RestoreInfoToInstAPI(cassandra.Spec.RestoreFrom), models.CassandraAppKind)
+ id, err = r.API.RestoreCluster(c.RestoreInfoToInstAPI(c.Spec.RestoreFrom), models.CassandraAppKind)
if err != nil {
l.Error(err, "Cannot restore cluster from backup",
- "original cluster ID", cassandra.Spec.RestoreFrom.ClusterID,
+ "original cluster ID", c.Spec.RestoreFrom.ClusterID,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.CreationFailed,
+ c, models.Warning, models.CreationFailed,
"Cluster restore from backup on the Instaclustr is failed. Reason: %v",
err,
)
@@ -141,26 +144,26 @@ func (r *CassandraReconciler) handleCreateCluster(
}
r.EventRecorder.Eventf(
- cassandra, models.Normal, models.Created,
+ c, models.Normal, models.Created,
"Cluster restore request is sent. Original cluster ID: %s, new cluster ID: %s",
- cassandra.Spec.RestoreFrom.ClusterID,
+ c.Spec.RestoreFrom.ClusterID,
id,
)
} else {
l.Info(
"Creating cluster",
- "cluster name", cassandra.Spec.Name,
- "data centres", cassandra.Spec.DataCentres,
+ "cluster name", c.Spec.Name,
+ "data centres", c.Spec.DataCentres,
)
- id, err = r.API.CreateCluster(instaclustr.CassandraEndpoint, cassandra.Spec.ToInstAPI())
+ id, err = r.API.CreateCluster(instaclustr.CassandraEndpoint, c.Spec.ToInstAPI())
if err != nil {
l.Error(
err, "Cannot create cluster",
- "cluster spec", cassandra.Spec,
+ "cluster spec", c.Spec,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.CreationFailed,
+ c, models.Warning, models.CreationFailed,
"Cluster creation on the Instaclustr is failed. Reason: %v",
err,
)
@@ -168,45 +171,45 @@ func (r *CassandraReconciler) handleCreateCluster(
}
r.EventRecorder.Eventf(
- cassandra, models.Normal, models.Created,
+ c, models.Normal, models.Created,
"Cluster creation request is sent. Cluster ID: %s",
id,
)
}
- cassandra.Status.ID = id
- err = r.Status().Patch(ctx, cassandra, patch)
+ c.Status.ID = id
+ err = r.Status().Patch(ctx, c, patch)
if err != nil {
l.Error(err, "Cannot patch cluster status",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
- "kind", cassandra.Kind,
- "api Version", cassandra.APIVersion,
- "namespace", cassandra.Namespace,
- "cluster metadata", cassandra.ObjectMeta,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
+ "kind", c.Kind,
+ "api Version", c.APIVersion,
+ "namespace", c.Namespace,
+ "cluster metadata", c.ObjectMeta,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.PatchFailed,
+ c, models.Warning, models.PatchFailed,
"Cluster resource status patch is failed. Reason: %v",
err,
)
return reconcile.Result{}, err
}
- controllerutil.AddFinalizer(cassandra, models.DeletionFinalizer)
- cassandra.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent
- err = r.Patch(ctx, cassandra, patch)
+ controllerutil.AddFinalizer(c, models.DeletionFinalizer)
+ c.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent
+ err = r.Patch(ctx, c, patch)
if err != nil {
l.Error(err, "Cannot patch cluster",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
- "kind", cassandra.Kind,
- "api Version", cassandra.APIVersion,
- "namespace", cassandra.Namespace,
- "cluster metadata", cassandra.ObjectMeta,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
+ "kind", c.Kind,
+ "api Version", c.APIVersion,
+ "namespace", c.Namespace,
+ "cluster metadata", c.ObjectMeta,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.PatchFailed,
+ c, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v",
err,
)
@@ -215,22 +218,22 @@ func (r *CassandraReconciler) handleCreateCluster(
l.Info(
"Cluster has been created",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
- "kind", cassandra.Kind,
- "api Version", cassandra.APIVersion,
- "namespace", cassandra.Namespace,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
+ "kind", c.Kind,
+ "api Version", c.APIVersion,
+ "namespace", c.Namespace,
)
}
- if cassandra.Status.State != models.DeletedStatus {
- err = r.startClusterStatusJob(cassandra)
+ if c.Status.State != models.DeletedStatus {
+ err = r.startClusterStatusJob(c)
if err != nil {
l.Error(err, "Cannot start cluster status job",
- "cassandra cluster ID", cassandra.Status.ID)
+ "c cluster ID", c.Status.ID)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.CreationFailed,
+ c, models.Warning, models.CreationFailed,
"Cluster status check job is failed. Reason: %v",
err,
)
@@ -238,41 +241,118 @@ func (r *CassandraReconciler) handleCreateCluster(
}
r.EventRecorder.Eventf(
- cassandra, models.Normal, models.Created,
+ c, models.Normal, models.Created,
"Cluster status check job is started",
)
+ }
+ if c.Spec.OnPremisesSpec != nil {
+ iData, err := r.API.GetCassandra(c.Status.ID)
+ if err != nil {
+ l.Error(err, "Cannot get cluster from the Instaclustr API",
+ "cluster name", c.Spec.Name,
+ "data centres", c.Spec.DataCentres,
+ "cluster ID", c.Status.ID,
+ )
+ r.EventRecorder.Eventf(
+ c, models.Warning, models.FetchFailed,
+ "Cluster fetch from the Instaclustr API is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+ iCassandra, err := c.FromInstAPI(iData)
+ if err != nil {
+ l.Error(
+ err, "Cannot convert cluster from the Instaclustr API",
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
+ )
+ r.EventRecorder.Eventf(
+ c, models.Warning, models.ConversionFailed,
+ "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ bootstrap := newOnPremisesBootstrap(
+ r.Client,
+ c,
+ r.EventRecorder,
+ iCassandra.Status.ClusterStatus,
+ c.Spec.OnPremisesSpec,
+ newExposePorts(c.GetExposePorts()),
+ c.GetHeadlessPorts(),
+ c.Spec.PrivateNetworkCluster,
+ )
+
+ err = handleCreateOnPremisesClusterResources(ctx, bootstrap)
+ if err != nil {
+ l.Error(
+ err, "Cannot create resources for on-premises cluster",
+ "cluster spec", c.Spec.OnPremisesSpec,
+ )
+ r.EventRecorder.Eventf(
+ c, models.Warning, models.CreationFailed,
+ "Resources creation for on-premises cluster is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
- err = r.startClusterBackupsJob(cassandra)
+ err = r.startClusterOnPremisesIPsJob(c, bootstrap)
if err != nil {
- l.Error(err, "Cannot start cluster backups check job",
- "cluster ID", cassandra.Status.ID,
+ l.Error(err, "Cannot start on-premises cluster IPs check job",
+ "cluster ID", c.Status.ID,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.CreationFailed,
- "Cluster backups check job is failed. Reason: %v",
+ c, models.Warning, models.CreationFailed,
+ "On-premises cluster IPs check job is failed. Reason: %v",
err,
)
return reconcile.Result{}, err
}
+ l.Info(
+ "On-premises resources have been created",
+ "cluster name", c.Spec.Name,
+ "on-premises Spec", c.Spec.OnPremisesSpec,
+ "cluster ID", c.Status.ID,
+ )
+ return models.ExitReconcile, nil
+ }
+
+ err = r.startClusterBackupsJob(c)
+ if err != nil {
+ l.Error(err, "Cannot start cluster backups check job",
+ "cluster ID", c.Status.ID,
+ )
+
r.EventRecorder.Eventf(
- cassandra, models.Normal, models.Created,
- "Cluster backups check job is started",
+ c, models.Warning, models.CreationFailed,
+ "Cluster backups check job is failed. Reason: %v",
+ err,
)
+ return reconcile.Result{}, err
+ }
- if cassandra.Spec.UserRefs != nil && cassandra.Status.AvailableUsers == nil {
- err = r.startUsersCreationJob(cassandra)
- if err != nil {
- l.Error(err, "Failed to start user creation job")
- r.EventRecorder.Eventf(cassandra, models.Warning, models.CreationFailed,
- "User creation job is failed. Reason: %v", err)
- return reconcile.Result{}, err
- }
+ r.EventRecorder.Eventf(
+ c, models.Normal, models.Created,
+ "Cluster backups check job is started",
+ )
- r.EventRecorder.Event(cassandra, models.Normal, models.Created,
- "Cluster user creation job is started")
+ if c.Spec.UserRefs != nil && c.Status.AvailableUsers == nil {
+ err = r.startUsersCreationJob(c)
+ if err != nil {
+ l.Error(err, "Failed to start user creation job")
+ r.EventRecorder.Eventf(c, models.Warning, models.CreationFailed,
+ "User creation job is failed. Reason: %v", err)
+ return reconcile.Result{}, err
}
+
+ r.EventRecorder.Event(c, models.Normal, models.Created,
+ "Cluster user creation job is started")
}
return models.ExitReconcile, nil
@@ -281,99 +361,99 @@ func (r *CassandraReconciler) handleCreateCluster(
func (r *CassandraReconciler) handleUpdateCluster(
ctx context.Context,
l logr.Logger,
- cassandra *v1beta1.Cassandra,
+ c *v1beta1.Cassandra,
) (reconcile.Result, error) {
l = l.WithName("Cassandra update event")
- iData, err := r.API.GetCassandra(cassandra.Status.ID)
+ iData, err := r.API.GetCassandra(c.Status.ID)
if err != nil {
l.Error(err, "Cannot get cluster from the Instaclustr API",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.FetchFailed,
+ c, models.Warning, models.FetchFailed,
"Cluster fetch from the Instaclustr API is failed. Reason: %v",
err,
)
return reconcile.Result{}, err
}
- iCassandra, err := cassandra.FromInstAPI(iData)
+ iCassandra, err := c.FromInstAPI(iData)
if err != nil {
l.Error(
err, "Cannot convert cluster from the Instaclustr API",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.ConvertionFailed,
+ c, models.Warning, models.ConversionFailed,
"Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v",
err,
)
return reconcile.Result{}, err
}
- if cassandra.Annotations[models.ExternalChangesAnnotation] == models.True {
- return r.handleExternalChanges(cassandra, iCassandra, l)
+ if c.Annotations[models.ExternalChangesAnnotation] == models.True {
+ return r.handleExternalChanges(c, iCassandra, l)
}
- patch := cassandra.NewPatch()
+ patch := c.NewPatch()
- if cassandra.Spec.ClusterSettingsNeedUpdate(iCassandra.Spec.Cluster) {
+ if c.Spec.ClusterSettingsNeedUpdate(iCassandra.Spec.Cluster) {
l.Info("Updating cluster settings",
"instaclustr description", iCassandra.Spec.Description,
"instaclustr two factor delete", iCassandra.Spec.TwoFactorDelete)
- err = r.API.UpdateClusterSettings(cassandra.Status.ID, cassandra.Spec.ClusterSettingsUpdateToInstAPI())
+ err = r.API.UpdateClusterSettings(c.Status.ID, c.Spec.ClusterSettingsUpdateToInstAPI())
if err != nil {
l.Error(err, "Cannot update cluster settings",
- "cluster ID", cassandra.Status.ID, "cluster spec", cassandra.Spec)
- r.EventRecorder.Eventf(cassandra, models.Warning, models.UpdateFailed,
+ "cluster ID", c.Status.ID, "cluster spec", c.Spec)
+ r.EventRecorder.Eventf(c, models.Warning, models.UpdateFailed,
"Cannot update cluster settings. Reason: %v", err)
return reconcile.Result{}, err
}
}
- if !cassandra.Spec.AreDCsEqual(iCassandra.Spec.DataCentres) {
+ if !c.Spec.AreDCsEqual(iCassandra.Spec.DataCentres) {
l.Info("Update request to Instaclustr API has been sent",
- "spec data centres", cassandra.Spec.DataCentres,
- "resize settings", cassandra.Spec.ResizeSettings,
+ "spec data centres", c.Spec.DataCentres,
+ "resize settings", c.Spec.ResizeSettings,
)
- err = r.API.UpdateCassandra(cassandra.Status.ID, cassandra.Spec.DCsUpdateToInstAPI())
+ err = r.API.UpdateCassandra(c.Status.ID, c.Spec.DCsUpdateToInstAPI())
if err != nil {
l.Error(err, "Cannot update cluster",
- "cluster ID", cassandra.Status.ID,
- "cluster name", cassandra.Spec.Name,
- "cluster spec", cassandra.Spec,
- "cluster state", cassandra.Status.State,
+ "cluster ID", c.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster spec", c.Spec,
+ "cluster state", c.Status.State,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.UpdateFailed,
+ c, models.Warning, models.UpdateFailed,
"Cluster update on the Instaclustr API is failed. Reason: %v",
err,
)
if errors.Is(err, instaclustr.ClusterIsNotReadyToResize) {
- patch := cassandra.NewPatch()
- cassandra.Annotations[models.UpdateQueuedAnnotation] = models.True
- err = r.Patch(ctx, cassandra, patch)
+ patch := c.NewPatch()
+ c.Annotations[models.UpdateQueuedAnnotation] = models.True
+ err = r.Patch(ctx, c, patch)
if err != nil {
l.Error(err, "Cannot patch cluster resource",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
- "kind", cassandra.Kind,
- "api Version", cassandra.APIVersion,
- "namespace", cassandra.Namespace,
- "cluster metadata", cassandra.ObjectMeta,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
+ "kind", c.Kind,
+ "api Version", c.APIVersion,
+ "namespace", c.Namespace,
+ "cluster metadata", c.ObjectMeta,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.PatchFailed,
+ c, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v",
err,
)
@@ -385,29 +465,29 @@ func (r *CassandraReconciler) handleUpdateCluster(
}
}
- err = handleUsersChanges(ctx, r.Client, r, cassandra)
+ err = handleUsersChanges(ctx, r.Client, r, c)
if err != nil {
l.Error(err, "Failed to handle users changes")
- r.EventRecorder.Eventf(cassandra, models.Warning, models.PatchFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed,
"Handling users changes is failed. Reason: %w", err,
)
return reconcile.Result{}, err
}
- cassandra.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent
- cassandra.Annotations[models.UpdateQueuedAnnotation] = ""
- err = r.Patch(ctx, cassandra, patch)
+ c.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent
+ c.Annotations[models.UpdateQueuedAnnotation] = ""
+ err = r.Patch(ctx, c, patch)
if err != nil {
l.Error(err, "Cannot patch cluster resource",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
- "kind", cassandra.Kind,
- "api Version", cassandra.APIVersion,
- "namespace", cassandra.Namespace,
- "cluster metadata", cassandra.ObjectMeta,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
+ "kind", c.Kind,
+ "api Version", c.APIVersion,
+ "namespace", c.Namespace,
+ "cluster metadata", c.ObjectMeta,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.PatchFailed,
+ c, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v",
err,
)
@@ -416,49 +496,49 @@ func (r *CassandraReconciler) handleUpdateCluster(
l.Info(
"Cluster has been updated",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
- "data centres", cassandra.Spec.DataCentres,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
+ "data centres", c.Spec.DataCentres,
)
return models.ExitReconcile, nil
}
-func (r *CassandraReconciler) handleExternalChanges(cassandra, iCassandra *v1beta1.Cassandra, l logr.Logger) (reconcile.Result, error) {
- if !cassandra.Spec.IsEqual(iCassandra.Spec) {
+func (r *CassandraReconciler) handleExternalChanges(c, iCassandra *v1beta1.Cassandra, l logr.Logger) (reconcile.Result, error) {
+ if !c.Spec.IsEqual(iCassandra.Spec) {
l.Info(msgSpecStillNoMatch,
- "specification of k8s resource", cassandra.Spec,
+ "specification of k8s resource", c.Spec,
"data from Instaclustr ", iCassandra.Spec)
- msgDiffSpecs, err := createSpecDifferenceMessage(cassandra.Spec, iCassandra.Spec)
+ msgDiffSpecs, err := createSpecDifferenceMessage(c.Spec, iCassandra.Spec)
if err != nil {
l.Error(err, "Cannot create specification difference message",
- "instaclustr data", iCassandra.Spec, "k8s resource spec", cassandra.Spec)
+ "instaclustr data", iCassandra.Spec, "k8s resource spec", c.Spec)
return models.ExitReconcile, nil
}
- r.EventRecorder.Eventf(cassandra, models.Warning, models.ExternalChanges, msgDiffSpecs)
+ r.EventRecorder.Eventf(c, models.Warning, models.ExternalChanges, msgDiffSpecs)
return models.ExitReconcile, nil
}
- patch := cassandra.NewPatch()
+ patch := c.NewPatch()
- cassandra.Annotations[models.ExternalChangesAnnotation] = ""
+ c.Annotations[models.ExternalChangesAnnotation] = ""
- err := r.Patch(context.Background(), cassandra, patch)
+ err := r.Patch(context.Background(), c, patch)
if err != nil {
l.Error(err, "Cannot patch cluster resource",
- "cluster name", cassandra.Spec.Name, "cluster ID", cassandra.Status.ID)
+ "cluster name", c.Spec.Name, "cluster ID", c.Status.ID)
- r.EventRecorder.Eventf(cassandra, models.Warning, models.PatchFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v", err)
return reconcile.Result{}, err
}
- l.Info("External changes have been reconciled", "resource ID", cassandra.Status.ID)
- r.EventRecorder.Event(cassandra, models.Normal, models.ExternalChanges, "External changes have been reconciled")
+ l.Info("External changes have been reconciled", "resource ID", c.Status.ID)
+ r.EventRecorder.Event(c, models.Normal, models.ExternalChanges, "External changes have been reconciled")
return models.ExitReconcile, nil
}
@@ -466,91 +546,108 @@ func (r *CassandraReconciler) handleExternalChanges(cassandra, iCassandra *v1bet
func (r *CassandraReconciler) handleDeleteCluster(
ctx context.Context,
l logr.Logger,
- cassandra *v1beta1.Cassandra,
+ c *v1beta1.Cassandra,
) (reconcile.Result, error) {
l = l.WithName("Cassandra deletion event")
- _, err := r.API.GetCassandra(cassandra.Status.ID)
+ _, err := r.API.GetCassandra(c.Status.ID)
if err != nil && !errors.Is(err, instaclustr.NotFound) {
l.Error(
err, "Cannot get cluster from the Instaclustr API",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
- "kind", cassandra.Kind,
- "api Version", cassandra.APIVersion,
- "namespace", cassandra.Namespace,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
+ "kind", c.Kind,
+ "api Version", c.APIVersion,
+ "namespace", c.Namespace,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.FetchFailed,
+ c, models.Warning, models.FetchFailed,
"Cluster fetch from the Instaclustr API is failed. Reason: %v",
err,
)
return reconcile.Result{}, err
}
- patch := cassandra.NewPatch()
+ patch := c.NewPatch()
if !errors.Is(err, instaclustr.NotFound) {
l.Info("Sending cluster deletion to the Instaclustr API",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID)
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID)
- err = r.API.DeleteCluster(cassandra.Status.ID, instaclustr.CassandraEndpoint)
+ err = r.API.DeleteCluster(c.Status.ID, instaclustr.CassandraEndpoint)
if err != nil {
l.Error(err, "Cannot delete cluster",
- "cluster name", cassandra.Spec.Name,
- "state", cassandra.Status.State,
- "kind", cassandra.Kind,
- "api Version", cassandra.APIVersion,
- "namespace", cassandra.Namespace,
+ "cluster name", c.Spec.Name,
+ "state", c.Status.State,
+ "kind", c.Kind,
+ "api Version", c.APIVersion,
+ "namespace", c.Namespace,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.DeletionFailed,
+ c, models.Warning, models.DeletionFailed,
"Cluster deletion on the Instaclustr API is failed. Reason: %v",
err,
)
return reconcile.Result{}, err
}
- r.EventRecorder.Event(cassandra, models.Normal, models.DeletionStarted,
+ r.EventRecorder.Event(c, models.Normal, models.DeletionStarted,
"Cluster deletion request is sent to the Instaclustr API.")
- if cassandra.Spec.TwoFactorDelete != nil {
- cassandra.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent
- cassandra.Annotations[models.ClusterDeletionAnnotation] = models.Triggered
- err = r.Patch(ctx, cassandra, patch)
+ if c.Spec.TwoFactorDelete != nil {
+ c.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent
+ c.Annotations[models.ClusterDeletionAnnotation] = models.Triggered
+ err = r.Patch(ctx, c, patch)
if err != nil {
l.Error(err, "Cannot patch cluster resource",
- "cluster name", cassandra.Spec.Name,
- "cluster state", cassandra.Status.State)
- r.EventRecorder.Eventf(cassandra, models.Warning, models.PatchFailed,
+ "cluster name", c.Spec.Name,
+ "cluster state", c.Status.State)
+ r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v", err)
return reconcile.Result{}, err
}
- l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", cassandra.Status.ID)
+ l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", c.Status.ID)
- r.EventRecorder.Event(cassandra, models.Normal, models.DeletionStarted,
+ r.EventRecorder.Event(c, models.Normal, models.DeletionStarted,
"Two-Factor Delete is enabled, please confirm cluster deletion via email or phone.")
return models.ExitReconcile, nil
}
}
- r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.UserCreator))
- r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.BackupsChecker))
- r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.StatusChecker))
+ r.Scheduler.RemoveJob(c.GetJobID(scheduler.UserCreator))
+ r.Scheduler.RemoveJob(c.GetJobID(scheduler.BackupsChecker))
+ r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker))
+
+ if c.Spec.OnPremisesSpec != nil {
+ err = deleteOnPremResources(ctx, r.Client, c.Status.ID, c.Namespace)
+ if err != nil {
+ l.Error(err, "Cannot delete cluster on-premises resources",
+ "cluster ID", c.Status.ID)
+ r.EventRecorder.Eventf(c, models.Warning, models.DeletionFailed,
+ "Cluster on-premises resources deletion is failed. Reason: %v", err)
+ return reconcile.Result{}, err
+ }
- l.Info("Deleting cluster backup resources", "cluster ID", cassandra.Status.ID)
+ l.Info("Cluster on-premises resources are deleted",
+ "cluster ID", c.Status.ID)
+ r.EventRecorder.Eventf(c, models.Normal, models.Deleted,
+ "Cluster on-premises resources are deleted")
+ r.Scheduler.RemoveJob(c.GetJobID(scheduler.OnPremisesIPsChecker))
+ }
- err = r.deleteBackups(ctx, cassandra.Status.ID, cassandra.Namespace)
+ l.Info("Deleting cluster backup resources", "cluster ID", c.Status.ID)
+
+ err = r.deleteBackups(ctx, c.Status.ID, c.Namespace)
if err != nil {
l.Error(err, "Cannot delete cluster backup resources",
- "cluster ID", cassandra.Status.ID,
+ "cluster ID", c.Status.ID,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.DeletionFailed,
+ c, models.Warning, models.DeletionFailed,
"Cluster backups deletion is failed. Reason: %v",
err,
)
@@ -558,72 +655,72 @@ func (r *CassandraReconciler) handleDeleteCluster(
}
l.Info("Cluster backup resources were deleted",
- "cluster ID", cassandra.Status.ID,
+ "cluster ID", c.Status.ID,
)
r.EventRecorder.Eventf(
- cassandra, models.Normal, models.Deleted,
+ c, models.Normal, models.Deleted,
"Cluster backup resources are deleted",
)
- err = detachUsers(ctx, r.Client, r, cassandra)
+ err = detachUsers(ctx, r.Client, r, c)
if err != nil {
l.Error(err, "Failed to detach users from the cluster")
- r.EventRecorder.Eventf(cassandra, models.Warning, models.DeletionFailed,
+ r.EventRecorder.Eventf(c, models.Warning, models.DeletionFailed,
"Detaching users from the cluster is failed. Reason: %w", err,
)
return reconcile.Result{}, err
}
- controllerutil.RemoveFinalizer(cassandra, models.DeletionFinalizer)
- cassandra.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent
- err = r.Patch(ctx, cassandra, patch)
+ controllerutil.RemoveFinalizer(c, models.DeletionFinalizer)
+ c.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent
+ err = r.Patch(ctx, c, patch)
if err != nil {
l.Error(err, "Cannot patch cluster resource",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
- "kind", cassandra.Kind,
- "api Version", cassandra.APIVersion,
- "namespace", cassandra.Namespace,
- "cluster metadata", cassandra.ObjectMeta,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
+ "kind", c.Kind,
+ "api Version", c.APIVersion,
+ "namespace", c.Namespace,
+ "cluster metadata", c.ObjectMeta,
)
r.EventRecorder.Eventf(
- cassandra, models.Warning, models.PatchFailed,
+ c, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v",
err,
)
return reconcile.Result{}, err
}
- err = exposeservice.Delete(r.Client, cassandra.Name, cassandra.Namespace)
+ err = exposeservice.Delete(r.Client, c.Name, c.Namespace)
if err != nil {
l.Error(err, "Cannot delete Cassandra cluster expose service",
- "cluster ID", cassandra.Status.ID,
- "cluster name", cassandra.Spec.Name,
+ "cluster ID", c.Status.ID,
+ "cluster name", c.Spec.Name,
)
return reconcile.Result{}, err
}
l.Info("Cluster has been deleted",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
- "kind", cassandra.Kind,
- "api Version", cassandra.APIVersion)
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
+ "kind", c.Kind,
+ "api Version", c.APIVersion)
r.EventRecorder.Eventf(
- cassandra, models.Normal, models.Deleted,
+ c, models.Normal, models.Deleted,
"Cluster resource is deleted",
)
return models.ExitReconcile, nil
}
-func (r *CassandraReconciler) startClusterStatusJob(cassandraCluster *v1beta1.Cassandra) error {
- job := r.newWatchStatusJob(cassandraCluster)
+func (r *CassandraReconciler) startClusterStatusJob(c *v1beta1.Cassandra) error {
+ job := r.newWatchStatusJob(c)
- err := r.Scheduler.ScheduleJob(cassandraCluster.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job)
+ err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job)
if err != nil {
return err
}
@@ -653,55 +750,66 @@ func (r *CassandraReconciler) startUsersCreationJob(cluster *v1beta1.Cassandra)
return nil
}
-func (r *CassandraReconciler) newWatchStatusJob(cassandra *v1beta1.Cassandra) scheduler.Job {
+func (r *CassandraReconciler) startClusterOnPremisesIPsJob(c *v1beta1.Cassandra, b *onPremisesBootstrap) error {
+ job := newWatchOnPremisesIPsJob(c.Kind, b)
+
+ err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *CassandraReconciler) newWatchStatusJob(c *v1beta1.Cassandra) scheduler.Job {
l := log.Log.WithValues("component", "CassandraStatusClusterJob")
return func() error {
- namespacedName := client.ObjectKeyFromObject(cassandra)
- err := r.Get(context.Background(), namespacedName, cassandra)
+ namespacedName := client.ObjectKeyFromObject(c)
+ err := r.Get(context.Background(), namespacedName, c)
if k8serrors.IsNotFound(err) {
l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.",
"namespaced name", namespacedName)
- r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.BackupsChecker))
- r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.UserCreator))
- r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.StatusChecker))
+ r.Scheduler.RemoveJob(c.GetJobID(scheduler.BackupsChecker))
+ r.Scheduler.RemoveJob(c.GetJobID(scheduler.UserCreator))
+ r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker))
return nil
}
- iData, err := r.API.GetCassandra(cassandra.Status.ID)
+ iData, err := r.API.GetCassandra(c.Status.ID)
if err != nil {
if errors.Is(err, instaclustr.NotFound) {
- if cassandra.DeletionTimestamp != nil {
- _, err = r.handleDeleteCluster(context.Background(), l, cassandra)
+ if c.DeletionTimestamp != nil {
+ _, err = r.handleDeleteCluster(context.Background(), l, c)
return err
}
- return r.handleExternalDelete(context.Background(), cassandra)
+ return r.handleExternalDelete(context.Background(), c)
}
l.Error(err, "Cannot get cluster from the Instaclustr API",
- "clusterID", cassandra.Status.ID)
+ "clusterID", c.Status.ID)
return err
}
- iCassandra, err := cassandra.FromInstAPI(iData)
+ iCassandra, err := c.FromInstAPI(iData)
if err != nil {
l.Error(err, "Cannot convert cluster from the Instaclustr API",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
)
return err
}
- if !areStatusesEqual(&iCassandra.Status.ClusterStatus, &cassandra.Status.ClusterStatus) {
+ if !areStatusesEqual(&iCassandra.Status.ClusterStatus, &c.Status.ClusterStatus) {
l.Info("Updating cluster status",
"status from Instaclustr", iCassandra.Status.ClusterStatus,
- "status from k8s", cassandra.Status.ClusterStatus)
+ "status from k8s", c.Status.ClusterStatus)
- areDCsEqual := areDataCentresEqual(iCassandra.Status.ClusterStatus.DataCentres, cassandra.Status.ClusterStatus.DataCentres)
+ areDCsEqual := areDataCentresEqual(iCassandra.Status.ClusterStatus.DataCentres, c.Status.ClusterStatus.DataCentres)
- patch := cassandra.NewPatch()
- cassandra.Status.ClusterStatus = iCassandra.Status.ClusterStatus
- err = r.Status().Patch(context.Background(), cassandra, patch)
+ patch := c.NewPatch()
+ c.Status.ClusterStatus = iCassandra.Status.ClusterStatus
+ err = r.Status().Patch(context.Background(), c, patch)
if err != nil {
return err
}
@@ -714,9 +822,9 @@ func (r *CassandraReconciler) newWatchStatusJob(cassandra *v1beta1.Cassandra) sc
}
err = exposeservice.Create(r.Client,
- cassandra.Name,
- cassandra.Namespace,
- cassandra.Spec.PrivateNetworkCluster,
+ c.Name,
+ c.Namespace,
+ c.Spec.PrivateNetworkCluster,
nodes,
models.CassandraConnectionPort)
if err != nil {
@@ -726,49 +834,49 @@ func (r *CassandraReconciler) newWatchStatusJob(cassandra *v1beta1.Cassandra) sc
}
if iCassandra.Status.CurrentClusterOperationStatus == models.NoOperation &&
- cassandra.Annotations[models.ResourceStateAnnotation] != models.UpdatingEvent &&
- cassandra.Annotations[models.UpdateQueuedAnnotation] != models.True &&
- !cassandra.Spec.IsEqual(iCassandra.Spec) {
- l.Info(msgExternalChanges, "instaclustr data", iCassandra.Spec, "k8s resource spec", cassandra.Spec)
+ c.Annotations[models.ResourceStateAnnotation] != models.UpdatingEvent &&
+ c.Annotations[models.UpdateQueuedAnnotation] != models.True &&
+ !c.Spec.IsEqual(iCassandra.Spec) {
+ l.Info(msgExternalChanges, "instaclustr data", iCassandra.Spec, "k8s resource spec", c.Spec)
- patch := cassandra.NewPatch()
- cassandra.Annotations[models.ExternalChangesAnnotation] = models.True
+ patch := c.NewPatch()
+ c.Annotations[models.ExternalChangesAnnotation] = models.True
- err = r.Patch(context.Background(), cassandra, patch)
+ err = r.Patch(context.Background(), c, patch)
if err != nil {
l.Error(err, "Cannot patch cluster cluster",
- "cluster name", cassandra.Spec.Name, "cluster state", cassandra.Status.State)
+ "cluster name", c.Spec.Name, "cluster state", c.Status.State)
return err
}
- msgDiffSpecs, err := createSpecDifferenceMessage(cassandra.Spec, iCassandra.Spec)
+ msgDiffSpecs, err := createSpecDifferenceMessage(c.Spec, iCassandra.Spec)
if err != nil {
l.Error(err, "Cannot create specification difference message",
- "instaclustr data", iCassandra.Spec, "k8s resource spec", cassandra.Spec)
+ "instaclustr data", iCassandra.Spec, "k8s resource spec", c.Spec)
return err
}
- r.EventRecorder.Eventf(cassandra, models.Warning, models.ExternalChanges, msgDiffSpecs)
+ r.EventRecorder.Eventf(c, models.Warning, models.ExternalChanges, msgDiffSpecs)
}
//TODO: change all context.Background() and context.TODO() to ctx from Reconcile
- err = r.reconcileMaintenanceEvents(context.Background(), cassandra)
+ err = r.reconcileMaintenanceEvents(context.Background(), c)
if err != nil {
l.Error(err, "Cannot reconcile cluster maintenance events",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
)
return err
}
- if cassandra.Status.State == models.RunningStatus && cassandra.Status.CurrentClusterOperationStatus == models.OperationInProgress {
- patch := cassandra.NewPatch()
- for _, dc := range cassandra.Status.DataCentres {
+ if c.Status.State == models.RunningStatus && c.Status.CurrentClusterOperationStatus == models.OperationInProgress {
+ patch := c.NewPatch()
+ for _, dc := range c.Status.DataCentres {
resizeOperations, err := r.API.GetResizeOperationsByClusterDataCentreID(dc.ID)
if err != nil {
l.Error(err, "Cannot get data centre resize operations",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
"data centre ID", dc.ID,
)
@@ -776,11 +884,11 @@ func (r *CassandraReconciler) newWatchStatusJob(cassandra *v1beta1.Cassandra) sc
}
dc.ResizeOperations = resizeOperations
- err = r.Status().Patch(context.Background(), cassandra, patch)
+ err = r.Status().Patch(context.Background(), c, patch)
if err != nil {
l.Error(err, "Cannot patch data centre resize operations",
- "cluster name", cassandra.Spec.Name,
- "cluster ID", cassandra.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
"data centre ID", dc.ID,
)
@@ -793,12 +901,12 @@ func (r *CassandraReconciler) newWatchStatusJob(cassandra *v1beta1.Cassandra) sc
}
}
-func (r *CassandraReconciler) newWatchBackupsJob(cluster *v1beta1.Cassandra) scheduler.Job {
+func (r *CassandraReconciler) newWatchBackupsJob(c *v1beta1.Cassandra) scheduler.Job {
l := log.Log.WithValues("component", "cassandraBackupsClusterJob")
return func() error {
ctx := context.Background()
- err := r.Get(ctx, types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.Name}, cluster)
+ err := r.Get(ctx, types.NamespacedName{Namespace: c.Namespace, Name: c.Name}, c)
if err != nil {
if k8serrors.IsNotFound(err) {
return nil
@@ -807,11 +915,11 @@ func (r *CassandraReconciler) newWatchBackupsJob(cluster *v1beta1.Cassandra) sch
return err
}
- iBackups, err := r.API.GetClusterBackups(cluster.Status.ID, models.ClusterKindsMap[cluster.Kind])
+ iBackups, err := r.API.GetClusterBackups(c.Status.ID, models.ClusterKindsMap[c.Kind])
if err != nil {
l.Error(err, "Cannot get cluster backups",
- "cluster name", cluster.Spec.Name,
- "cluster ID", cluster.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
)
return err
@@ -819,11 +927,11 @@ func (r *CassandraReconciler) newWatchBackupsJob(cluster *v1beta1.Cassandra) sch
iBackupEvents := iBackups.GetBackupEvents(models.CassandraKind)
- k8sBackupList, err := r.listClusterBackups(ctx, cluster.Status.ID, cluster.Namespace)
+ k8sBackupList, err := r.listClusterBackups(ctx, c.Status.ID, c.Namespace)
if err != nil {
l.Error(err, "Cannot list cluster backups",
- "cluster name", cluster.Spec.Name,
- "cluster ID", cluster.Status.ID,
+ "cluster name", c.Spec.Name,
+ "cluster ID", c.Status.ID,
)
return err
@@ -887,7 +995,7 @@ func (r *CassandraReconciler) newWatchBackupsJob(cluster *v1beta1.Cassandra) sch
continue
}
- backupSpec := cluster.NewBackupSpec(start)
+ backupSpec := c.NewBackupSpec(start)
err = r.Create(ctx, backupSpec)
if err != nil {
return err
diff --git a/controllers/clusters/helpers.go b/controllers/clusters/helpers.go
index 6ee2bb09f..e35ce6d4a 100644
--- a/controllers/clusters/helpers.go
+++ b/controllers/clusters/helpers.go
@@ -117,26 +117,31 @@ func isDataCentreNodesEqual(a, b []*v1beta1.Node) bool {
if a == nil && b == nil {
return true
}
-
if len(a) != len(b) {
return false
}
for i := range a {
- if a[i].ID != b[i].ID {
- continue
- }
+ var eq bool
+ for j := range b {
+ if a[i].ID != b[j].ID {
+ continue
+ }
- if a[i].Size != b[i].Size ||
- a[i].PublicAddress != b[i].PublicAddress ||
- a[i].PrivateAddress != b[i].PrivateAddress ||
- a[i].Status != b[i].Status ||
- !slices.Equal(a[i].Roles, b[i].Roles) ||
- a[i].Rack != b[i].Rack {
+ if a[i].Size != b[j].Size ||
+ a[i].PublicAddress != b[j].PublicAddress ||
+ a[i].PrivateAddress != b[j].PrivateAddress ||
+ a[i].Status != b[j].Status ||
+ !slices.Equal(a[i].Roles, b[j].Roles) ||
+ a[i].Rack != b[j].Rack {
+ return false
+ }
+ eq = true
+ }
+ if !eq {
return false
}
}
-
return true
}
diff --git a/controllers/clusters/kafka_controller.go b/controllers/clusters/kafka_controller.go
index d076e73c0..9acd59b04 100644
--- a/controllers/clusters/kafka_controller.go
+++ b/controllers/clusters/kafka_controller.go
@@ -57,6 +57,14 @@ type KafkaReconciler struct {
//+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=kafkas/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=kafkas/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch
+//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
@@ -66,8 +74,8 @@ type KafkaReconciler struct {
func (r *KafkaReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
l := log.FromContext(ctx)
- var kafka v1beta1.Kafka
- err := r.Client.Get(ctx, req.NamespacedName, &kafka)
+ var k v1beta1.Kafka
+ err := r.Client.Get(ctx, req.NamespacedName, &k)
if err != nil {
if k8serrors.IsNotFound(err) {
l.Info("Kafka custom resource is not found", "namespaced name ", req.NamespacedName)
@@ -78,39 +86,39 @@ func (r *KafkaReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl
return reconcile.Result{}, err
}
- switch kafka.Annotations[models.ResourceStateAnnotation] {
+ switch k.Annotations[models.ResourceStateAnnotation] {
case models.CreatingEvent:
- return r.handleCreateCluster(ctx, &kafka, l)
+ return r.handleCreateCluster(ctx, &k, l)
case models.UpdatingEvent:
- return r.handleUpdateCluster(ctx, &kafka, l)
+ return r.handleUpdateCluster(ctx, &k, l)
case models.DeletingEvent:
- return r.handleDeleteCluster(ctx, &kafka, l)
+ return r.handleDeleteCluster(ctx, &k, l)
case models.GenericEvent:
- l.Info("Event isn't handled", "cluster name", kafka.Spec.Name, "request", req,
- "event", kafka.Annotations[models.ResourceStateAnnotation])
+ l.Info("Event isn't handled", "cluster name", k.Spec.Name, "request", req,
+ "event", k.Annotations[models.ResourceStateAnnotation])
return models.ExitReconcile, nil
}
return models.ExitReconcile, nil
}
-func (r *KafkaReconciler) handleCreateCluster(ctx context.Context, kafka *v1beta1.Kafka, l logr.Logger) (reconcile.Result, error) {
+func (r *KafkaReconciler) handleCreateCluster(ctx context.Context, k *v1beta1.Kafka, l logr.Logger) (reconcile.Result, error) {
l = l.WithName("Kafka creation Event")
var err error
- if kafka.Status.ID == "" {
+ if k.Status.ID == "" {
l.Info("Creating cluster",
- "cluster name", kafka.Spec.Name,
- "data centres", kafka.Spec.DataCentres)
+ "cluster name", k.Spec.Name,
+ "data centres", k.Spec.DataCentres)
- patch := kafka.NewPatch()
- kafka.Status.ID, err = r.API.CreateCluster(instaclustr.KafkaEndpoint, kafka.Spec.ToInstAPI())
+ patch := k.NewPatch()
+ k.Status.ID, err = r.API.CreateCluster(instaclustr.KafkaEndpoint, k.Spec.ToInstAPI())
if err != nil {
l.Error(err, "Cannot create cluster",
- "spec", kafka.Spec,
+ "spec", k.Spec,
)
r.EventRecorder.Eventf(
- kafka, models.Warning, models.CreationFailed,
+ k, models.Warning, models.CreationFailed,
"Cluster creation on the Instaclustr is failed. Reason: %v",
err,
)
@@ -118,33 +126,33 @@ func (r *KafkaReconciler) handleCreateCluster(ctx context.Context, kafka *v1beta
}
r.EventRecorder.Eventf(
- kafka, models.Normal, models.Created,
+ k, models.Normal, models.Created,
"Cluster creation request is sent. Cluster ID: %s",
- kafka.Status.ID,
+ k.Status.ID,
)
- err = r.Status().Patch(ctx, kafka, patch)
+ err = r.Status().Patch(ctx, k, patch)
if err != nil {
l.Error(err, "Cannot patch cluster status",
- "spec", kafka.Spec,
+ "spec", k.Spec,
)
r.EventRecorder.Eventf(
- kafka, models.Warning, models.PatchFailed,
+ k, models.Warning, models.PatchFailed,
"Cluster resource status patch is failed. Reason: %v",
err,
)
return reconcile.Result{}, err
}
- kafka.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent
- controllerutil.AddFinalizer(kafka, models.DeletionFinalizer)
- err = r.Patch(ctx, kafka, patch)
+ k.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent
+ controllerutil.AddFinalizer(k, models.DeletionFinalizer)
+ err = r.Patch(ctx, k, patch)
if err != nil {
l.Error(err, "Cannot patch cluster resource",
- "name", kafka.Spec.Name,
+ "name", k.Spec.Name,
)
r.EventRecorder.Eventf(
- kafka, models.Warning, models.PatchFailed,
+ k, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v",
err,
)
@@ -152,17 +160,17 @@ func (r *KafkaReconciler) handleCreateCluster(ctx context.Context, kafka *v1beta
}
l.Info("Cluster has been created",
- "cluster ID", kafka.Status.ID,
+ "cluster ID", k.Status.ID,
)
}
- if kafka.Status.State != models.DeletedStatus {
- err = r.startClusterStatusJob(kafka)
+ if k.Status.State != models.DeletedStatus {
+ err = r.startClusterStatusJob(k)
if err != nil {
l.Error(err, "Cannot start cluster status job",
- "cluster ID", kafka.Status.ID)
+ "cluster ID", k.Status.ID)
r.EventRecorder.Eventf(
- kafka, models.Warning, models.CreationFailed,
+ k, models.Warning, models.CreationFailed,
"Cluster status check job creation is failed. Reason: %v",
err,
)
@@ -170,24 +178,102 @@ func (r *KafkaReconciler) handleCreateCluster(ctx context.Context, kafka *v1beta
}
r.EventRecorder.Eventf(
- kafka, models.Normal, models.Created,
+ k, models.Normal, models.Created,
"Cluster status check job is started",
)
- if kafka.Spec.UserRefs != nil {
- err = r.startUsersCreationJob(kafka)
+ if k.Spec.UserRefs != nil {
+ err = r.startUsersCreationJob(k)
if err != nil {
l.Error(err, "Failed to start user creation job")
- r.EventRecorder.Eventf(kafka, models.Warning, models.CreationFailed,
+ r.EventRecorder.Eventf(k, models.Warning, models.CreationFailed,
"User creation job is failed. Reason: %v", err,
)
return reconcile.Result{}, err
}
- r.EventRecorder.Event(kafka, models.Normal, models.Created,
+ r.EventRecorder.Event(k, models.Normal, models.Created,
"Cluster user creation job is started",
)
}
+
+ if k.Spec.OnPremisesSpec != nil {
+ iData, err := r.API.GetKafka(k.Status.ID)
+ if err != nil {
+ l.Error(err, "Cannot get cluster from the Instaclustr API",
+ "cluster name", k.Spec.Name,
+ "data centres", k.Spec.DataCentres,
+ "cluster ID", k.Status.ID,
+ )
+ r.EventRecorder.Eventf(
+ k, models.Warning, models.FetchFailed,
+ "Cluster fetch from the Instaclustr API is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+ iKafka, err := k.FromInstAPI(iData)
+ if err != nil {
+ l.Error(
+ err, "Cannot convert cluster from the Instaclustr API",
+ "cluster name", k.Spec.Name,
+ "cluster ID", k.Status.ID,
+ )
+ r.EventRecorder.Eventf(
+ k, models.Warning, models.ConversionFailed,
+ "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ bootstrap := newOnPremisesBootstrap(
+ r.Client,
+ k,
+ r.EventRecorder,
+ iKafka.Status.ClusterStatus,
+ k.Spec.OnPremisesSpec,
+ newExposePorts(k.GetExposePorts()),
+ k.GetHeadlessPorts(),
+ k.Spec.PrivateNetworkCluster,
+ )
+
+ err = handleCreateOnPremisesClusterResources(ctx, bootstrap)
+ if err != nil {
+ l.Error(
+ err, "Cannot create resources for on-premises cluster",
+ "cluster spec", k.Spec.OnPremisesSpec,
+ )
+ r.EventRecorder.Eventf(
+ k, models.Warning, models.CreationFailed,
+ "Resources creation for on-premises cluster is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ err = r.startClusterOnPremisesIPsJob(k, bootstrap)
+ if err != nil {
+ l.Error(err, "Cannot start on-premises cluster IPs check job",
+ "cluster ID", k.Status.ID,
+ )
+
+ r.EventRecorder.Eventf(
+ k, models.Warning, models.CreationFailed,
+ "On-premises cluster IPs check job is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ l.Info(
+ "On-premises resources have been created",
+ "cluster name", k.Spec.Name,
+ "on-premises Spec", k.Spec.OnPremisesSpec,
+ "cluster ID", k.Status.ID,
+ )
+ return models.ExitReconcile, nil
+ }
}
return models.ExitReconcile, nil
@@ -212,7 +298,7 @@ func (r *KafkaReconciler) handleUpdateCluster(
return reconcile.Result{}, err
}
- if iKafka.Status.ClusterStatus.State != StatusRUNNING {
+ if iKafka.Status.ClusterStatus.State != models.RunningStatus {
l.Error(instaclustr.ClusterNotRunning, "Unable to update cluster, cluster still not running",
"cluster name", k.Spec.Name,
"cluster state", iKafka.Status.ClusterStatus.State)
@@ -359,35 +445,35 @@ func (r *KafkaReconciler) handleExternalChanges(k, ik *v1beta1.Kafka, l logr.Log
return models.ExitReconcile, nil
}
-func (r *KafkaReconciler) handleDeleteCluster(ctx context.Context, kafka *v1beta1.Kafka, l logr.Logger) (reconcile.Result, error) {
+func (r *KafkaReconciler) handleDeleteCluster(ctx context.Context, k *v1beta1.Kafka, l logr.Logger) (reconcile.Result, error) {
l = l.WithName("Kafka deletion Event")
- _, err := r.API.GetKafka(kafka.Status.ID)
+ _, err := r.API.GetKafka(k.Status.ID)
if err != nil && !errors.Is(err, instaclustr.NotFound) {
l.Error(err, "Cannot get cluster from the Instaclustr API",
- "cluster name", kafka.Spec.Name,
- "cluster state", kafka.Status.ClusterStatus.State)
+ "cluster name", k.Spec.Name,
+ "cluster state", k.Status.ClusterStatus.State)
r.EventRecorder.Eventf(
- kafka, models.Warning, models.FetchFailed,
+ k, models.Warning, models.FetchFailed,
"Cluster resource fetch from the Instaclustr API is failed. Reason: %v",
err,
)
return reconcile.Result{}, err
}
- patch := kafka.NewPatch()
+ patch := k.NewPatch()
if !errors.Is(err, instaclustr.NotFound) {
l.Info("Sending cluster deletion to the Instaclustr API",
- "cluster name", kafka.Spec.Name,
- "cluster ID", kafka.Status.ID)
+ "cluster name", k.Spec.Name,
+ "cluster ID", k.Status.ID)
- err = r.API.DeleteCluster(kafka.Status.ID, instaclustr.KafkaEndpoint)
+ err = r.API.DeleteCluster(k.Status.ID, instaclustr.KafkaEndpoint)
if err != nil {
l.Error(err, "Cannot delete cluster",
- "cluster name", kafka.Spec.Name,
- "cluster state", kafka.Status.ClusterStatus.State)
+ "cluster name", k.Spec.Name,
+ "cluster state", k.Status.ClusterStatus.State)
r.EventRecorder.Eventf(
- kafka, models.Warning, models.DeletionFailed,
+ k, models.Warning, models.DeletionFailed,
"Cluster deletion is failed on the Instaclustr. Reason: %v",
err,
)
@@ -395,82 +481,110 @@ func (r *KafkaReconciler) handleDeleteCluster(ctx context.Context, kafka *v1beta
}
r.EventRecorder.Eventf(
- kafka, models.Normal, models.DeletionStarted,
+ k, models.Normal, models.DeletionStarted,
"Cluster deletion request is sent to the Instaclustr API.",
)
- if kafka.Spec.TwoFactorDelete != nil {
- kafka.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent
- kafka.Annotations[models.ClusterDeletionAnnotation] = models.Triggered
- err = r.Patch(ctx, kafka, patch)
+ if k.Spec.TwoFactorDelete != nil {
+ k.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent
+ k.Annotations[models.ClusterDeletionAnnotation] = models.Triggered
+ err = r.Patch(ctx, k, patch)
if err != nil {
l.Error(err, "Cannot patch cluster resource",
- "cluster name", kafka.Spec.Name,
- "cluster state", kafka.Status.State)
+ "cluster name", k.Spec.Name,
+ "cluster state", k.Status.State)
r.EventRecorder.Eventf(
- kafka, models.Warning, models.PatchFailed,
+ k, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v",
err,
)
return reconcile.Result{}, err
}
- l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", kafka.Status.ID)
+ l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", k.Status.ID)
- r.EventRecorder.Event(kafka, models.Normal, models.DeletionStarted,
+ r.EventRecorder.Event(k, models.Normal, models.DeletionStarted,
"Two-Factor Delete is enabled, please confirm cluster deletion via email or phone.")
return models.ExitReconcile, nil
}
}
- err = detachUsers(ctx, r.Client, r, kafka)
+ err = detachUsers(ctx, r.Client, r, k)
if err != nil {
l.Error(err, "Failed to detach users from the cluster")
- r.EventRecorder.Eventf(kafka, models.Warning, models.DeletionFailed,
+ r.EventRecorder.Eventf(k, models.Warning, models.DeletionFailed,
"Detaching users from the cluster is failed. Reason: %w", err,
)
return reconcile.Result{}, err
}
- r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.StatusChecker))
- r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.UserCreator))
- controllerutil.RemoveFinalizer(kafka, models.DeletionFinalizer)
- kafka.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent
- err = r.Patch(ctx, kafka, patch)
+ if k.Spec.OnPremisesSpec != nil {
+ err = deleteOnPremResources(ctx, r.Client, k.Status.ID, k.Namespace)
+ if err != nil {
+ l.Error(err, "Cannot delete cluster on-premises resources",
+ "cluster ID", k.Status.ID)
+ r.EventRecorder.Eventf(k, models.Warning, models.DeletionFailed,
+ "Cluster on-premises resources deletion is failed. Reason: %v", err)
+ return reconcile.Result{}, err
+ }
+
+ l.Info("Cluster on-premises resources are deleted",
+ "cluster ID", k.Status.ID)
+ r.EventRecorder.Eventf(k, models.Normal, models.Deleted,
+ "Cluster on-premises resources are deleted")
+ r.Scheduler.RemoveJob(k.GetJobID(scheduler.OnPremisesIPsChecker))
+ }
+
+ r.Scheduler.RemoveJob(k.GetJobID(scheduler.StatusChecker))
+ r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator))
+ controllerutil.RemoveFinalizer(k, models.DeletionFinalizer)
+ k.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent
+ err = r.Patch(ctx, k, patch)
if err != nil {
l.Error(err, "Cannot patch cluster resource",
- "cluster name", kafka.Spec.Name)
+ "cluster name", k.Spec.Name)
r.EventRecorder.Eventf(
- kafka, models.Warning, models.PatchFailed,
+ k, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v",
err,
)
return reconcile.Result{}, err
}
- err = exposeservice.Delete(r.Client, kafka.Name, kafka.Namespace)
+ err = exposeservice.Delete(r.Client, k.Name, k.Namespace)
if err != nil {
l.Error(err, "Cannot delete Kafka cluster expose service",
- "cluster ID", kafka.Status.ID,
- "cluster name", kafka.Spec.Name,
+ "cluster ID", k.Status.ID,
+ "cluster name", k.Spec.Name,
)
return reconcile.Result{}, err
}
l.Info("Cluster was deleted",
- "cluster name", kafka.Spec.Name,
- "cluster ID", kafka.Status.ID)
+ "cluster name", k.Spec.Name,
+ "cluster ID", k.Status.ID)
r.EventRecorder.Eventf(
- kafka, models.Normal, models.Deleted,
+ k, models.Normal, models.Deleted,
"Cluster resource is deleted",
)
return models.ExitReconcile, nil
}
+func (r *KafkaReconciler) startClusterOnPremisesIPsJob(k *v1beta1.Kafka, b *onPremisesBootstrap) error {
+ job := newWatchOnPremisesIPsJob(k.Kind, b)
+
+ err := r.Scheduler.ScheduleJob(k.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
func (r *KafkaReconciler) startClusterStatusJob(kafka *v1beta1.Kafka) error {
job := r.newWatchStatusJob(kafka)
@@ -482,61 +596,61 @@ func (r *KafkaReconciler) startClusterStatusJob(kafka *v1beta1.Kafka) error {
return nil
}
-func (r *KafkaReconciler) newWatchStatusJob(kafka *v1beta1.Kafka) scheduler.Job {
+func (r *KafkaReconciler) newWatchStatusJob(k *v1beta1.Kafka) scheduler.Job {
l := log.Log.WithValues("component", "kafkaStatusClusterJob")
return func() error {
- namespacedName := client.ObjectKeyFromObject(kafka)
- err := r.Get(context.Background(), namespacedName, kafka)
+ namespacedName := client.ObjectKeyFromObject(k)
+ err := r.Get(context.Background(), namespacedName, k)
if k8serrors.IsNotFound(err) {
l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.",
"namespaced name", namespacedName)
- r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.StatusChecker))
- r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.UserCreator))
- r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.BackupsChecker))
+ r.Scheduler.RemoveJob(k.GetJobID(scheduler.StatusChecker))
+ r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator))
+ r.Scheduler.RemoveJob(k.GetJobID(scheduler.BackupsChecker))
return nil
}
if err != nil {
l.Error(err, "Cannot get cluster resource",
- "resource name", kafka.Name)
+ "resource name", k.Name)
return err
}
- iData, err := r.API.GetKafka(kafka.Status.ID)
+ iData, err := r.API.GetKafka(k.Status.ID)
if err != nil {
if errors.Is(err, instaclustr.NotFound) {
- if kafka.DeletionTimestamp != nil {
- _, err = r.handleDeleteCluster(context.Background(), kafka, l)
+ if k.DeletionTimestamp != nil {
+ _, err = r.handleDeleteCluster(context.Background(), k, l)
return err
}
- return r.handleExternalDelete(context.Background(), kafka)
+ return r.handleExternalDelete(context.Background(), k)
}
- l.Error(err, "Cannot get cluster from the Instaclustr", "cluster ID", kafka.Status.ID)
+ l.Error(err, "Cannot get cluster from the Instaclustr", "cluster ID", k.Status.ID)
return err
}
- iKafka, err := kafka.FromInstAPI(iData)
+ iKafka, err := k.FromInstAPI(iData)
if err != nil {
l.Error(err, "Cannot convert cluster from the Instaclustr API",
- "cluster ID", kafka.Status.ID,
+ "cluster ID", k.Status.ID,
)
return err
}
- if !areStatusesEqual(&kafka.Status.ClusterStatus, &iKafka.Status.ClusterStatus) {
+ if !areStatusesEqual(&k.Status.ClusterStatus, &iKafka.Status.ClusterStatus) {
l.Info("Kafka status of k8s is different from Instaclustr. Reconcile k8s resource status..",
"instacluster status", iKafka.Status,
- "k8s status", kafka.Status.ClusterStatus)
+ "k8s status", k.Status.ClusterStatus)
- areDCsEqual := areDataCentresEqual(iKafka.Status.ClusterStatus.DataCentres, kafka.Status.ClusterStatus.DataCentres)
+ areDCsEqual := areDataCentresEqual(iKafka.Status.ClusterStatus.DataCentres, k.Status.ClusterStatus.DataCentres)
- patch := kafka.NewPatch()
- kafka.Status.ClusterStatus = iKafka.Status.ClusterStatus
- err = r.Status().Patch(context.Background(), kafka, patch)
+ patch := k.NewPatch()
+ k.Status.ClusterStatus = iKafka.Status.ClusterStatus
+ err = r.Status().Patch(context.Background(), k, patch)
if err != nil {
l.Error(err, "Cannot patch cluster cluster",
- "cluster name", kafka.Spec.Name, "cluster state", kafka.Status.State)
+ "cluster name", k.Spec.Name, "cluster state", k.Status.State)
return err
}
@@ -548,9 +662,9 @@ func (r *KafkaReconciler) newWatchStatusJob(kafka *v1beta1.Kafka) scheduler.Job
}
err = exposeservice.Create(r.Client,
- kafka.Name,
- kafka.Namespace,
- kafka.Spec.PrivateNetworkCluster,
+ k.Name,
+ k.Namespace,
+ k.Spec.PrivateNetworkCluster,
nodes,
models.KafkaConnectionPort)
if err != nil {
@@ -560,49 +674,49 @@ func (r *KafkaReconciler) newWatchStatusJob(kafka *v1beta1.Kafka) scheduler.Job
}
if iKafka.Status.CurrentClusterOperationStatus == models.NoOperation &&
- kafka.Annotations[models.UpdateQueuedAnnotation] != models.True &&
- !kafka.Spec.IsEqual(iKafka.Spec) {
+ k.Annotations[models.UpdateQueuedAnnotation] != models.True &&
+ !k.Spec.IsEqual(iKafka.Spec) {
- patch := kafka.NewPatch()
- kafka.Annotations[models.ExternalChangesAnnotation] = models.True
+ patch := k.NewPatch()
+ k.Annotations[models.ExternalChangesAnnotation] = models.True
- err = r.Patch(context.Background(), kafka, patch)
+ err = r.Patch(context.Background(), k, patch)
if err != nil {
l.Error(err, "Cannot patch cluster cluster",
- "cluster name", kafka.Spec.Name, "cluster state", kafka.Status.State)
+ "cluster name", k.Spec.Name, "cluster state", k.Status.State)
return err
}
l.Info("The k8s specification is different from Instaclustr Console. Update operations are blocked.",
- "instaclustr data", iKafka.Spec, "k8s resource spec", kafka.Spec)
+ "instaclustr data", iKafka.Spec, "k8s resource spec", k.Spec)
- msgDiffSpecs, err := createSpecDifferenceMessage(kafka.Spec, iKafka.Spec)
+ msgDiffSpecs, err := createSpecDifferenceMessage(k.Spec, iKafka.Spec)
if err != nil {
l.Error(err, "Cannot create specification difference message",
- "instaclustr data", iKafka.Spec, "k8s resource spec", kafka.Spec)
+ "instaclustr data", iKafka.Spec, "k8s resource spec", k.Spec)
return err
}
- r.EventRecorder.Eventf(kafka, models.Warning, models.ExternalChanges, msgDiffSpecs)
+ r.EventRecorder.Eventf(k, models.Warning, models.ExternalChanges, msgDiffSpecs)
}
//TODO: change all context.Background() and context.TODO() to ctx from Reconcile
- err = r.reconcileMaintenanceEvents(context.Background(), kafka)
+ err = r.reconcileMaintenanceEvents(context.Background(), k)
if err != nil {
l.Error(err, "Cannot reconcile cluster maintenance events",
- "cluster name", kafka.Spec.Name,
- "cluster ID", kafka.Status.ID,
+ "cluster name", k.Spec.Name,
+ "cluster ID", k.Status.ID,
)
return err
}
- if kafka.Status.State == models.RunningStatus && kafka.Status.CurrentClusterOperationStatus == models.OperationInProgress {
- patch := kafka.NewPatch()
- for _, dc := range kafka.Status.DataCentres {
+ if k.Status.State == models.RunningStatus && k.Status.CurrentClusterOperationStatus == models.OperationInProgress {
+ patch := k.NewPatch()
+ for _, dc := range k.Status.DataCentres {
resizeOperations, err := r.API.GetResizeOperationsByClusterDataCentreID(dc.ID)
if err != nil {
l.Error(err, "Cannot get data centre resize operations",
- "cluster name", kafka.Spec.Name,
- "cluster ID", kafka.Status.ID,
+ "cluster name", k.Spec.Name,
+ "cluster ID", k.Status.ID,
"data centre ID", dc.ID,
)
@@ -610,11 +724,11 @@ func (r *KafkaReconciler) newWatchStatusJob(kafka *v1beta1.Kafka) scheduler.Job
}
dc.ResizeOperations = resizeOperations
- err = r.Status().Patch(context.Background(), kafka, patch)
+ err = r.Status().Patch(context.Background(), k, patch)
if err != nil {
l.Error(err, "Cannot patch data centre resize operations",
- "cluster name", kafka.Spec.Name,
- "cluster ID", kafka.Status.ID,
+ "cluster name", k.Spec.Name,
+ "cluster ID", k.Status.ID,
"data centre ID", dc.ID,
)
@@ -627,25 +741,25 @@ func (r *KafkaReconciler) newWatchStatusJob(kafka *v1beta1.Kafka) scheduler.Job
}
}
-func (r *KafkaReconciler) startUsersCreationJob(kafka *v1beta1.Kafka) error {
- job := r.newUsersCreationJob(kafka)
+func (r *KafkaReconciler) startUsersCreationJob(k *v1beta1.Kafka) error {
+ job := r.newUsersCreationJob(k)
- err := r.Scheduler.ScheduleJob(kafka.GetJobID(scheduler.UserCreator), scheduler.UserCreationInterval, job)
+ err := r.Scheduler.ScheduleJob(k.GetJobID(scheduler.UserCreator), scheduler.UserCreationInterval, job)
if err != nil {
return err
}
return nil
}
-func (r *KafkaReconciler) newUsersCreationJob(kafka *v1beta1.Kafka) scheduler.Job {
+func (r *KafkaReconciler) newUsersCreationJob(k *v1beta1.Kafka) scheduler.Job {
l := log.Log.WithValues("component", "kafkaUsersCreationJob")
return func() error {
ctx := context.Background()
err := r.Get(ctx, types.NamespacedName{
- Namespace: kafka.Namespace,
- Name: kafka.Name,
- }, kafka)
+ Namespace: k.Namespace,
+ Name: k.Name,
+ }, k)
if err != nil {
if k8serrors.IsNotFound(err) {
@@ -655,50 +769,50 @@ func (r *KafkaReconciler) newUsersCreationJob(kafka *v1beta1.Kafka) scheduler.Jo
return err
}
- if kafka.Status.State != models.RunningStatus {
+ if k.Status.State != models.RunningStatus {
l.Info("User creation job is scheduled")
- r.EventRecorder.Eventf(kafka, models.Normal, models.CreationFailed,
+ r.EventRecorder.Eventf(k, models.Normal, models.CreationFailed,
"User creation job is scheduled, cluster is not in the running state",
)
return nil
}
- err = handleUsersChanges(ctx, r.Client, r, kafka)
+ err = handleUsersChanges(ctx, r.Client, r, k)
if err != nil {
l.Error(err, "Failed to create users for the cluster")
- r.EventRecorder.Eventf(kafka, models.Warning, models.CreationFailed,
+ r.EventRecorder.Eventf(k, models.Warning, models.CreationFailed,
"Failed to create users for the cluster. Reason: %v", err)
return err
}
l.Info("User creation job successfully finished")
- r.EventRecorder.Eventf(kafka, models.Normal, models.Created,
+ r.EventRecorder.Eventf(k, models.Normal, models.Created,
"User creation job successfully finished",
)
- r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.UserCreator))
+ r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator))
return nil
}
}
-func (r *KafkaReconciler) handleExternalDelete(ctx context.Context, kafka *v1beta1.Kafka) error {
+func (r *KafkaReconciler) handleExternalDelete(ctx context.Context, k *v1beta1.Kafka) error {
l := log.FromContext(ctx)
- patch := kafka.NewPatch()
- kafka.Status.State = models.DeletedStatus
- err := r.Status().Patch(ctx, kafka, patch)
+ patch := k.NewPatch()
+ k.Status.State = models.DeletedStatus
+ err := r.Status().Patch(ctx, k, patch)
if err != nil {
return err
}
l.Info(instaclustr.MsgInstaclustrResourceNotFound)
- r.EventRecorder.Eventf(kafka, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound)
+ r.EventRecorder.Eventf(k, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound)
- r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.BackupsChecker))
- r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.UserCreator))
- r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.StatusChecker))
+ r.Scheduler.RemoveJob(k.GetJobID(scheduler.BackupsChecker))
+ r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator))
+ r.Scheduler.RemoveJob(k.GetJobID(scheduler.StatusChecker))
return nil
}
diff --git a/controllers/clusters/kafkaconnect_controller.go b/controllers/clusters/kafkaconnect_controller.go
index 07879d330..e09149ac0 100644
--- a/controllers/clusters/kafkaconnect_controller.go
+++ b/controllers/clusters/kafkaconnect_controller.go
@@ -55,6 +55,14 @@ type KafkaConnectReconciler struct {
//+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=kafkaconnects/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=kafkaconnects/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch
+//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
@@ -64,8 +72,8 @@ type KafkaConnectReconciler struct {
func (r *KafkaConnectReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
l := log.FromContext(ctx)
- kafkaConnect := &v1beta1.KafkaConnect{}
- err := r.Client.Get(ctx, req.NamespacedName, kafkaConnect)
+ kc := &v1beta1.KafkaConnect{}
+ err := r.Client.Get(ctx, req.NamespacedName, kc)
if err != nil {
if k8serrors.IsNotFound(err) {
l.Error(err, "KafkaConnect resource is not found", "request", req)
@@ -76,16 +84,16 @@ func (r *KafkaConnectReconciler) Reconcile(ctx context.Context, req ctrl.Request
return reconcile.Result{}, err
}
- switch kafkaConnect.Annotations[models.ResourceStateAnnotation] {
+ switch kc.Annotations[models.ResourceStateAnnotation] {
case models.CreatingEvent:
- return r.handleCreateCluster(ctx, kafkaConnect, l)
+ return r.handleCreateCluster(ctx, kc, l)
case models.UpdatingEvent:
- return r.handleUpdateCluster(ctx, kafkaConnect, l)
+ return r.handleUpdateCluster(ctx, kc, l)
case models.DeletingEvent:
- return r.handleDeleteCluster(ctx, kafkaConnect, l)
+ return r.handleDeleteCluster(ctx, kc, l)
default:
- l.Info("Event isn't handled", "cluster name", kafkaConnect.Spec.Name,
- "request", req, "event", kafkaConnect.Annotations[models.ResourceStateAnnotation])
+ l.Info("Event isn't handled", "cluster name", kc.Spec.Name,
+ "request", req, "event", kc.Annotations[models.ResourceStateAnnotation])
return models.ExitReconcile, nil
}
}
@@ -178,6 +186,83 @@ func (r *KafkaConnectReconciler) handleCreateCluster(ctx context.Context, kc *v1
"Cluster status check job is started",
)
}
+ if kc.Spec.OnPremisesSpec != nil {
+ iData, err := r.API.GetKafkaConnect(kc.Status.ID)
+ if err != nil {
+ l.Error(err, "Cannot get cluster from the Instaclustr API",
+ "cluster name", kc.Spec.Name,
+ "data centres", kc.Spec.DataCentres,
+ "cluster ID", kc.Status.ID,
+ )
+ r.EventRecorder.Eventf(
+ kc, models.Warning, models.FetchFailed,
+ "Cluster fetch from the Instaclustr API is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+ iKafkaConnect, err := kc.FromInst(iData)
+ if err != nil {
+ l.Error(
+ err, "Cannot convert cluster from the Instaclustr API",
+ "cluster name", kc.Spec.Name,
+ "cluster ID", kc.Status.ID,
+ )
+ r.EventRecorder.Eventf(
+ kc, models.Warning, models.ConversionFailed,
+ "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ bootstrap := newOnPremisesBootstrap(
+ r.Client,
+ kc,
+ r.EventRecorder,
+ iKafkaConnect.Status.ClusterStatus,
+ kc.Spec.OnPremisesSpec,
+ newExposePorts(kc.GetExposePorts()),
+ kc.GetHeadlessPorts(),
+ kc.Spec.PrivateNetworkCluster,
+ )
+
+ err = handleCreateOnPremisesClusterResources(ctx, bootstrap)
+ if err != nil {
+ l.Error(
+ err, "Cannot create resources for on-premises cluster",
+ "cluster spec", kc.Spec.OnPremisesSpec,
+ )
+ r.EventRecorder.Eventf(
+ kc, models.Warning, models.CreationFailed,
+ "Resources creation for on-premises cluster is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ err = r.startClusterOnPremisesIPsJob(kc, bootstrap)
+ if err != nil {
+ l.Error(err, "Cannot start on-premises cluster IPs check job",
+ "cluster ID", kc.Status.ID,
+ )
+
+ r.EventRecorder.Eventf(
+ kc, models.Warning, models.CreationFailed,
+ "On-premises cluster IPs check job is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ l.Info(
+ "On-premises resources have been created",
+ "cluster name", kc.Spec.Name,
+ "on-premises Spec", kc.Spec.OnPremisesSpec,
+ "cluster ID", kc.Status.ID,
+ )
+ return models.ExitReconcile, nil
+ }
return models.ExitReconcile, nil
}
@@ -202,7 +287,7 @@ func (r *KafkaConnectReconciler) handleUpdateCluster(ctx context.Context, kc *v1
l.Error(err, "Cannot convert Kafka Connect from Instaclustr",
"ClusterID", kc.Status.ID)
r.EventRecorder.Eventf(
- kc, models.Warning, models.ConvertionFailed,
+ kc, models.Warning, models.ConversionFailed,
"Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v",
err,
)
@@ -292,39 +377,39 @@ func (r *KafkaConnectReconciler) handleUpdateCluster(ctx context.Context, kc *v1
return models.ExitReconcile, nil
}
-func (r *KafkaConnectReconciler) handleExternalChanges(k, ik *v1beta1.KafkaConnect, l logr.Logger) (reconcile.Result, error) {
- if !k.Spec.IsEqual(ik.Spec) {
+func (r *KafkaConnectReconciler) handleExternalChanges(kc, ik *v1beta1.KafkaConnect, l logr.Logger) (reconcile.Result, error) {
+ if !kc.Spec.IsEqual(ik.Spec) {
l.Info(msgSpecStillNoMatch,
- "specification of k8s resource", k.Spec,
+ "specification of k8s resource", kc.Spec,
"data from Instaclustr ", ik.Spec)
- msgDiffSpecs, err := createSpecDifferenceMessage(k.Spec, ik.Spec)
+ msgDiffSpecs, err := createSpecDifferenceMessage(kc.Spec, ik.Spec)
if err != nil {
l.Error(err, "Cannot create specification difference message",
- "instaclustr data", ik.Spec, "k8s resource spec", k.Spec)
+ "instaclustr data", ik.Spec, "k8s resource spec", kc.Spec)
return models.ExitReconcile, nil
}
- r.EventRecorder.Eventf(k, models.Warning, models.ExternalChanges, msgDiffSpecs)
+ r.EventRecorder.Eventf(kc, models.Warning, models.ExternalChanges, msgDiffSpecs)
return models.ExitReconcile, nil
}
- patch := k.NewPatch()
+ patch := kc.NewPatch()
- k.Annotations[models.ExternalChangesAnnotation] = ""
+ kc.Annotations[models.ExternalChangesAnnotation] = ""
- err := r.Patch(context.Background(), k, patch)
+ err := r.Patch(context.Background(), kc, patch)
if err != nil {
l.Error(err, "Cannot patch cluster resource",
- "cluster name", k.Spec.Name, "cluster ID", k.Status.ID)
+ "cluster name", kc.Spec.Name, "cluster ID", kc.Status.ID)
- r.EventRecorder.Eventf(k, models.Warning, models.PatchFailed,
+ r.EventRecorder.Eventf(kc, models.Warning, models.PatchFailed,
"Cluster resource patch is failed. Reason: %v", err)
return reconcile.Result{}, err
}
- l.Info("External changes have been reconciled", "resource ID", k.Status.ID)
- r.EventRecorder.Event(k, models.Normal, models.ExternalChanges, "External changes have been reconciled")
+ l.Info("External changes have been reconciled", "resource ID", kc.Status.ID)
+ r.EventRecorder.Event(kc, models.Normal, models.ExternalChanges, "External changes have been reconciled")
return models.ExitReconcile, nil
}
@@ -390,6 +475,23 @@ func (r *KafkaConnectReconciler) handleDeleteCluster(ctx context.Context, kc *v1
return models.ExitReconcile, nil
}
+
+ if kc.Spec.OnPremisesSpec != nil {
+ err = deleteOnPremResources(ctx, r.Client, kc.Status.ID, kc.Namespace)
+ if err != nil {
+ l.Error(err, "Cannot delete cluster on-premises resources",
+ "cluster ID", kc.Status.ID)
+ r.EventRecorder.Eventf(kc, models.Warning, models.DeletionFailed,
+ "Cluster on-premises resources deletion is failed. Reason: %v", err)
+ return reconcile.Result{}, err
+ }
+
+ l.Info("Cluster on-premises resources are deleted",
+ "cluster ID", kc.Status.ID)
+ r.EventRecorder.Eventf(kc, models.Normal, models.Deleted,
+ "Cluster on-premises resources are deleted")
+ r.Scheduler.RemoveJob(kc.GetJobID(scheduler.OnPremisesIPsChecker))
+ }
}
err = deleteDefaultUserSecret(ctx, r.Client, client.ObjectKeyFromObject(kc))
@@ -472,6 +574,17 @@ func (r *KafkaConnectReconciler) createDefaultSecret(ctx context.Context, kc *v1
return nil
}
+func (r *KafkaConnectReconciler) startClusterOnPremisesIPsJob(k *v1beta1.KafkaConnect, b *onPremisesBootstrap) error {
+ job := newWatchOnPremisesIPsJob(k.Kind, b)
+
+ err := r.Scheduler.ScheduleJob(k.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
func (r *KafkaConnectReconciler) startClusterStatusJob(kc *v1beta1.KafkaConnect) error {
job := r.newWatchStatusJob(kc)
diff --git a/controllers/clusters/on_premises.go b/controllers/clusters/on_premises.go
new file mode 100644
index 000000000..78ca937cc
--- /dev/null
+++ b/controllers/clusters/on_premises.go
@@ -0,0 +1,861 @@
+/*
+Copyright 2022.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clusters
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ k8scorev1 "k8s.io/api/core/v1"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/client-go/tools/record"
+ virtcorev1 "kubevirt.io/api/core/v1"
+ cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+
+ "github.com/instaclustr/operator/apis/clusters/v1beta1"
+ "github.com/instaclustr/operator/pkg/models"
+ "github.com/instaclustr/operator/pkg/scheduler"
+)
+
+type onPremisesBootstrap struct {
+ K8sClient client.Client
+ K8sObject client.Object
+ EventRecorder record.EventRecorder
+ ClusterStatus v1beta1.ClusterStatus
+ OnPremisesSpec *v1beta1.OnPremisesSpec
+ ExposePorts []k8scorev1.ServicePort
+ HeadlessPorts []k8scorev1.ServicePort
+ PrivateNetworkCluster bool
+}
+
+func newOnPremisesBootstrap(
+ k8sClient client.Client,
+ o client.Object,
+ e record.EventRecorder,
+ status v1beta1.ClusterStatus,
+ onPremisesSpec *v1beta1.OnPremisesSpec,
+ exposePorts,
+ headlessPorts []k8scorev1.ServicePort,
+ privateNetworkCluster bool,
+) *onPremisesBootstrap {
+ return &onPremisesBootstrap{
+ K8sClient: k8sClient,
+ K8sObject: o,
+ EventRecorder: e,
+ ClusterStatus: status,
+ OnPremisesSpec: onPremisesSpec,
+ ExposePorts: exposePorts,
+ HeadlessPorts: headlessPorts,
+ PrivateNetworkCluster: privateNetworkCluster,
+ }
+}
+
+func handleCreateOnPremisesClusterResources(ctx context.Context, b *onPremisesBootstrap) error {
+ if len(b.ClusterStatus.DataCentres) < 1 {
+ return fmt.Errorf("datacenter ID is empty")
+ }
+
+ if b.PrivateNetworkCluster {
+ err := reconcileSSHGatewayResources(ctx, b)
+ if err != nil {
+ return err
+ }
+ }
+
+ err := reconcileNodesResources(ctx, b)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap) error {
+ gatewayDVSize, err := resource.ParseQuantity(b.OnPremisesSpec.OSDiskSize)
+ if err != nil {
+ return err
+ }
+
+ gatewayDVName := fmt.Sprintf("%s-%s", models.GatewayDVPrefix, strings.ToLower(b.K8sObject.GetName()))
+ gatewayDV, err := createDV(
+ ctx,
+ b,
+ gatewayDVName,
+ b.ClusterStatus.DataCentres[0].ID,
+ gatewayDVSize,
+ true,
+ )
+ if err != nil {
+ return err
+ }
+
+ gatewayCPU := resource.Quantity{}
+ gatewayCPU.Set(b.OnPremisesSpec.SSHGatewayCPU)
+
+ gatewayMemory, err := resource.ParseQuantity(b.OnPremisesSpec.SSHGatewayMemory)
+ if err != nil {
+ return err
+ }
+
+ gatewayName := fmt.Sprintf("%s-%s", models.GatewayVMPrefix, strings.ToLower(b.K8sObject.GetName()))
+
+ gatewayVM := &virtcorev1.VirtualMachine{}
+ err = b.K8sClient.Get(ctx, types.NamespacedName{
+ Namespace: b.K8sObject.GetNamespace(),
+ Name: gatewayName,
+ }, gatewayVM)
+ if client.IgnoreNotFound(err) != nil {
+ return err
+ }
+ if k8serrors.IsNotFound(err) {
+ gatewayVM, err = newVM(
+ ctx,
+ b,
+ gatewayName,
+ b.ClusterStatus.DataCentres[0].ID,
+ models.GatewayRack,
+ gatewayDV.Name,
+ gatewayCPU,
+ gatewayMemory)
+ if err != nil {
+ return err
+ }
+ err = b.K8sClient.Create(ctx, gatewayVM)
+ if err != nil {
+ return err
+ }
+ }
+
+ gatewaySvcName := fmt.Sprintf("%s-%s", models.GatewaySvcPrefix, gatewayName)
+ gatewayExposeService := &k8scorev1.Service{}
+ err = b.K8sClient.Get(ctx, types.NamespacedName{
+ Namespace: b.K8sObject.GetNamespace(),
+ Name: gatewaySvcName,
+ }, gatewayExposeService)
+
+ if client.IgnoreNotFound(err) != nil {
+ return err
+ }
+ if k8serrors.IsNotFound(err) {
+ gatewayExposeService = newExposeService(
+ b,
+ gatewaySvcName,
+ gatewayName,
+ b.ClusterStatus.DataCentres[0].ID,
+ )
+ err = b.K8sClient.Create(ctx, gatewayExposeService)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func reconcileNodesResources(ctx context.Context, b *onPremisesBootstrap) error {
+ for i, node := range b.ClusterStatus.DataCentres[0].Nodes {
+ nodeOSDiskSize, err := resource.ParseQuantity(b.OnPremisesSpec.OSDiskSize)
+ if err != nil {
+ return err
+ }
+
+ nodeOSDiskDVName := fmt.Sprintf("%s-%d-%s", models.NodeOSDVPrefix, i, strings.ToLower(b.K8sObject.GetName()))
+ nodeOSDV, err := createDV(
+ ctx,
+ b,
+ nodeOSDiskDVName,
+ node.ID,
+ nodeOSDiskSize,
+ true,
+ )
+ if err != nil {
+ return err
+ }
+
+ nodeDataDiskDVSize, err := resource.ParseQuantity(b.OnPremisesSpec.DataDiskSize)
+ if err != nil {
+ return err
+ }
+
+ nodeDataDiskDVName := fmt.Sprintf("%s-%d-%s", models.NodeDVPrefix, i, strings.ToLower(b.K8sObject.GetName()))
+ nodeDataDV, err := createDV(
+ ctx,
+ b,
+ nodeDataDiskDVName,
+ node.ID,
+ nodeDataDiskDVSize,
+ false,
+ )
+ if err != nil {
+ return err
+ }
+
+ nodeCPU := resource.Quantity{}
+ nodeCPU.Set(b.OnPremisesSpec.NodeCPU)
+
+ nodeMemory, err := resource.ParseQuantity(b.OnPremisesSpec.NodeMemory)
+ if err != nil {
+ return err
+ }
+
+ nodeName := fmt.Sprintf("%s-%d-%s", models.NodeVMPrefix, i, strings.ToLower(b.K8sObject.GetName()))
+
+ nodeVM := &virtcorev1.VirtualMachine{}
+ err = b.K8sClient.Get(ctx, types.NamespacedName{
+ Namespace: b.K8sObject.GetNamespace(),
+ Name: nodeName,
+ }, nodeVM)
+ if client.IgnoreNotFound(err) != nil {
+ return err
+ }
+ if k8serrors.IsNotFound(err) {
+ nodeVM, err = newVM(
+ ctx,
+ b,
+ nodeName,
+ node.ID,
+ node.Rack,
+ nodeOSDV.Name,
+ nodeCPU,
+ nodeMemory,
+ nodeDataDV.Name,
+ )
+ if err != nil {
+ return err
+ }
+ err = b.K8sClient.Create(ctx, nodeVM)
+ if err != nil {
+ return err
+ }
+ }
+
+ if !b.PrivateNetworkCluster {
+ nodeExposeName := fmt.Sprintf("%s-%s", models.NodeSvcPrefix, nodeName)
+ nodeExposeService := &k8scorev1.Service{}
+ err = b.K8sClient.Get(ctx, types.NamespacedName{
+ Namespace: b.K8sObject.GetNamespace(),
+ Name: nodeExposeName,
+ }, nodeExposeService)
+ if client.IgnoreNotFound(err) != nil {
+ return err
+ }
+ if k8serrors.IsNotFound(err) {
+ nodeExposeService = newExposeService(
+ b,
+ nodeExposeName,
+ nodeName,
+ node.ID,
+ )
+ err = b.K8sClient.Create(ctx, nodeExposeService)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ headlessServiceName := fmt.Sprintf("%s-%s", models.KubevirtSubdomain, strings.ToLower(b.K8sObject.GetName()))
+ headlessSVC := &k8scorev1.Service{}
+ err = b.K8sClient.Get(ctx, types.NamespacedName{
+ Namespace: b.K8sObject.GetNamespace(),
+ Name: headlessServiceName,
+ }, headlessSVC)
+
+ if client.IgnoreNotFound(err) != nil {
+ return err
+ }
+ if k8serrors.IsNotFound(err) {
+ headlessSVC = newHeadlessService(
+ b,
+ headlessServiceName,
+ )
+ err = b.K8sClient.Create(ctx, headlessSVC)
+ if err != nil {
+ return err
+ }
+ }
+
+ }
+ return nil
+}
+
+func createDV(
+ ctx context.Context,
+ b *onPremisesBootstrap,
+ name,
+ nodeID string,
+ size resource.Quantity,
+ isOSDisk bool,
+) (*cdiv1beta1.DataVolume, error) {
+ dv := &cdiv1beta1.DataVolume{}
+ err := b.K8sClient.Get(ctx, types.NamespacedName{
+ Namespace: b.K8sObject.GetNamespace(),
+ Name: name,
+ }, dv)
+ if client.IgnoreNotFound(err) != nil {
+ return nil, err
+ }
+ if k8serrors.IsNotFound(err) {
+ dv = newDataDiskDV(
+ b,
+ name,
+ nodeID,
+ size,
+ isOSDisk,
+ )
+ err = b.K8sClient.Create(ctx, dv)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return dv, nil
+}
+
+func newDataDiskDV(
+ b *onPremisesBootstrap,
+ name,
+ nodeID string,
+ storageSize resource.Quantity,
+ isOSDisk bool,
+) *cdiv1beta1.DataVolume {
+ dvSource := &cdiv1beta1.DataVolumeSource{}
+
+ if isOSDisk {
+ dvSource.HTTP = &cdiv1beta1.DataVolumeSourceHTTP{URL: b.OnPremisesSpec.OSImageURL}
+ } else {
+ dvSource.Blank = &cdiv1beta1.DataVolumeBlankImage{}
+ }
+
+ return &cdiv1beta1.DataVolume{
+ TypeMeta: metav1.TypeMeta{
+ Kind: models.DVKind,
+ APIVersion: models.CDIKubevirtV1beta1APIVersion,
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: b.K8sObject.GetNamespace(),
+ Labels: map[string]string{
+ models.ClusterIDLabel: b.ClusterStatus.ID,
+ models.NodeIDLabel: nodeID,
+ },
+ Finalizers: []string{models.DeletionFinalizer},
+ },
+ Spec: cdiv1beta1.DataVolumeSpec{
+ Source: dvSource,
+ PVC: &k8scorev1.PersistentVolumeClaimSpec{
+ AccessModes: []k8scorev1.PersistentVolumeAccessMode{
+ k8scorev1.ReadWriteOnce,
+ },
+ Resources: k8scorev1.ResourceRequirements{
+ Requests: k8scorev1.ResourceList{
+ models.Storage: storageSize,
+ },
+ },
+ StorageClassName: &b.OnPremisesSpec.StorageClassName,
+ },
+ },
+ }
+}
+
+func newVM(
+ ctx context.Context,
+ b *onPremisesBootstrap,
+ vmName,
+ nodeID,
+ nodeRack,
+ OSDiskDVName string,
+ cpu,
+ memory resource.Quantity,
+ storageDVNames ...string,
+) (*virtcorev1.VirtualMachine, error) {
+ runStrategy := virtcorev1.RunStrategyAlways
+ bootOrder1 := uint(1)
+
+ cloudInitSecret := &k8scorev1.Secret{}
+ err := b.K8sClient.Get(ctx, types.NamespacedName{
+ Namespace: b.OnPremisesSpec.CloudInitScriptRef.Namespace,
+ Name: b.OnPremisesSpec.CloudInitScriptRef.Name,
+ }, cloudInitSecret)
+ if err != nil {
+ return nil, err
+ }
+
+ labelSet := map[string]string{
+ models.ClusterIDLabel: b.ClusterStatus.ID,
+ models.NodeIDLabel: nodeID,
+ models.NodeRackLabel: nodeRack,
+ models.KubevirtDomainLabel: vmName,
+ }
+
+ if nodeRack != models.GatewayRack {
+ labelSet[models.NodeLabel] = models.WorkerNode
+ }
+
+ vm := &virtcorev1.VirtualMachine{
+ TypeMeta: metav1.TypeMeta{
+ Kind: models.VirtualMachineKind,
+ APIVersion: models.KubevirtV1APIVersion,
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: vmName,
+ Namespace: b.K8sObject.GetNamespace(),
+ Labels: labelSet,
+ Finalizers: []string{models.DeletionFinalizer},
+ },
+ Spec: virtcorev1.VirtualMachineSpec{
+ RunStrategy: &runStrategy,
+ Template: &virtcorev1.VirtualMachineInstanceTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: labelSet,
+ },
+ Spec: virtcorev1.VirtualMachineInstanceSpec{
+ Hostname: vmName,
+ Subdomain: fmt.Sprintf("%s-%s", models.KubevirtSubdomain, b.K8sObject.GetName()),
+ Domain: virtcorev1.DomainSpec{
+ Resources: virtcorev1.ResourceRequirements{
+ Requests: k8scorev1.ResourceList{
+ models.CPU: cpu,
+ models.Memory: memory,
+ },
+ },
+ Devices: virtcorev1.Devices{
+ Disks: []virtcorev1.Disk{
+ {
+ Name: models.Boot,
+ BootOrder: &bootOrder1,
+ IO: models.Native,
+ Cache: models.None,
+ DiskDevice: virtcorev1.DiskDevice{
+ Disk: &virtcorev1.DiskTarget{
+ Bus: models.Virtio,
+ },
+ },
+ },
+ {
+ Name: models.CloudInit,
+ DiskDevice: virtcorev1.DiskDevice{},
+ Cache: models.None,
+ },
+ },
+ Interfaces: []virtcorev1.Interface{
+ {
+ Name: models.Default,
+ InterfaceBindingMethod: virtcorev1.InterfaceBindingMethod{
+ Bridge: &virtcorev1.InterfaceBridge{},
+ },
+ },
+ },
+ },
+ },
+ Volumes: []virtcorev1.Volume{
+ {
+ Name: models.Boot,
+ VolumeSource: virtcorev1.VolumeSource{
+ PersistentVolumeClaim: &virtcorev1.PersistentVolumeClaimVolumeSource{
+ PersistentVolumeClaimVolumeSource: k8scorev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: OSDiskDVName,
+ },
+ },
+ },
+ },
+ {
+ Name: models.CloudInit,
+ VolumeSource: virtcorev1.VolumeSource{
+ CloudInitNoCloud: &virtcorev1.CloudInitNoCloudSource{
+ UserDataSecretRef: &k8scorev1.LocalObjectReference{
+ Name: b.OnPremisesSpec.CloudInitScriptRef.Name,
+ },
+ },
+ },
+ },
+ },
+ Networks: []virtcorev1.Network{
+ {
+ Name: models.Default,
+ NetworkSource: virtcorev1.NetworkSource{
+ Pod: &virtcorev1.PodNetwork{},
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for i, dvName := range storageDVNames {
+ diskName := fmt.Sprintf("%s-%d-%s", models.DataDisk, i, vm.Name)
+
+ vm.Spec.Template.Spec.Domain.Devices.Disks = append(vm.Spec.Template.Spec.Domain.Devices.Disks, virtcorev1.Disk{
+ Name: diskName,
+ IO: models.Native,
+ Cache: models.None,
+ DiskDevice: virtcorev1.DiskDevice{
+ Disk: &virtcorev1.DiskTarget{
+ Bus: models.Virtio,
+ },
+ },
+ Serial: models.DataDiskSerial,
+ })
+
+ vm.Spec.Template.Spec.Volumes = append(vm.Spec.Template.Spec.Volumes, virtcorev1.Volume{
+ Name: diskName,
+ VolumeSource: virtcorev1.VolumeSource{
+ PersistentVolumeClaim: &virtcorev1.PersistentVolumeClaimVolumeSource{
+ PersistentVolumeClaimVolumeSource: k8scorev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: dvName,
+ },
+ },
+ },
+ })
+ }
+
+ return vm, nil
+}
+
+func newExposeService(
+ b *onPremisesBootstrap,
+ svcName,
+ vmName,
+ nodeID string,
+) *k8scorev1.Service {
+ return &k8scorev1.Service{
+ TypeMeta: metav1.TypeMeta{
+ Kind: models.ServiceKind,
+ APIVersion: models.K8sAPIVersionV1,
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: svcName,
+ Namespace: b.K8sObject.GetNamespace(),
+ Labels: map[string]string{
+ models.ClusterIDLabel: b.ClusterStatus.ID,
+ models.NodeIDLabel: nodeID,
+ },
+ Finalizers: []string{models.DeletionFinalizer},
+ },
+ Spec: k8scorev1.ServiceSpec{
+ Ports: b.ExposePorts,
+ Selector: map[string]string{
+ models.KubevirtDomainLabel: vmName,
+ models.NodeIDLabel: nodeID,
+ },
+ Type: models.LBType,
+ },
+ }
+}
+
+func newHeadlessService(
+ b *onPremisesBootstrap,
+ svcName string,
+) *k8scorev1.Service {
+ return &k8scorev1.Service{
+ TypeMeta: metav1.TypeMeta{
+ Kind: models.ServiceKind,
+ APIVersion: models.K8sAPIVersionV1,
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: svcName,
+ Namespace: b.K8sObject.GetNamespace(),
+ Labels: map[string]string{
+ models.ClusterIDLabel: b.ClusterStatus.ID,
+ },
+ Finalizers: []string{models.DeletionFinalizer},
+ },
+ Spec: k8scorev1.ServiceSpec{
+ ClusterIP: "None",
+ Ports: b.HeadlessPorts,
+ Selector: map[string]string{
+ models.ClusterIDLabel: b.ClusterStatus.ID,
+ models.NodeLabel: models.WorkerNode,
+ },
+ },
+ }
+}
+
+func deleteOnPremResources(ctx context.Context, K8sClient client.Client, clusterID, ns string) error {
+ vms := &virtcorev1.VirtualMachineList{}
+ err := K8sClient.List(ctx, vms, &client.ListOptions{
+ LabelSelector: labels.SelectorFromSet(map[string]string{
+ models.ClusterIDLabel: clusterID,
+ }),
+ Namespace: ns,
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, vm := range vms.Items {
+ err = K8sClient.Delete(ctx, &vm)
+ if err != nil {
+ return err
+ }
+
+ patch := client.MergeFrom(vm.DeepCopy())
+ controllerutil.RemoveFinalizer(&vm, models.DeletionFinalizer)
+ err = K8sClient.Patch(ctx, &vm, patch)
+ if err != nil {
+ return err
+ }
+ }
+
+ vmis := &virtcorev1.VirtualMachineInstanceList{}
+ err = K8sClient.List(ctx, vmis, &client.ListOptions{
+ LabelSelector: labels.SelectorFromSet(map[string]string{
+ models.ClusterIDLabel: clusterID,
+ }),
+ Namespace: ns,
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, vmi := range vmis.Items {
+ err = K8sClient.Delete(ctx, &vmi)
+ if err != nil {
+ return err
+ }
+
+ patch := client.MergeFrom(vmi.DeepCopy())
+ controllerutil.RemoveFinalizer(&vmi, models.DeletionFinalizer)
+ err = K8sClient.Patch(ctx, &vmi, patch)
+ if err != nil {
+ return err
+ }
+ }
+
+ dvs := &cdiv1beta1.DataVolumeList{}
+ err = K8sClient.List(ctx, dvs, &client.ListOptions{
+ LabelSelector: labels.SelectorFromSet(map[string]string{
+ models.ClusterIDLabel: clusterID,
+ }),
+ Namespace: ns,
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, dv := range dvs.Items {
+ err = K8sClient.Delete(ctx, &dv)
+ if err != nil {
+ return err
+ }
+
+ patch := client.MergeFrom(dv.DeepCopy())
+ controllerutil.RemoveFinalizer(&dv, models.DeletionFinalizer)
+ err = K8sClient.Patch(ctx, &dv, patch)
+ if err != nil {
+ return err
+ }
+ }
+
+ svcs := &k8scorev1.ServiceList{}
+ err = K8sClient.List(ctx, svcs, &client.ListOptions{
+ LabelSelector: labels.SelectorFromSet(map[string]string{
+ models.ClusterIDLabel: clusterID,
+ }),
+ Namespace: ns,
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, svc := range svcs.Items {
+ err = K8sClient.Delete(ctx, &svc)
+ if err != nil {
+ return err
+ }
+
+ patch := client.MergeFrom(svc.DeepCopy())
+ controllerutil.RemoveFinalizer(&svc, models.DeletionFinalizer)
+ err = K8sClient.Patch(ctx, &svc, patch)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func newExposePorts(sp []k8scorev1.ServicePort) []k8scorev1.ServicePort {
+ var ports []k8scorev1.ServicePort
+ ports = []k8scorev1.ServicePort{{
+ Name: models.SSH,
+ Port: models.Port22,
+ TargetPort: intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: models.Port22,
+ },
+ },
+ }
+
+ ports = append(ports, sp...)
+
+ return ports
+}
+
+func newWatchOnPremisesIPsJob(kind string, b *onPremisesBootstrap) scheduler.Job {
+ l := log.Log.WithValues("component", fmt.Sprintf("%sOnPremisesIPsCheckerJob", kind))
+
+ return func() error {
+ allNodePods := &k8scorev1.PodList{}
+ err := b.K8sClient.List(context.Background(), allNodePods, &client.ListOptions{
+ LabelSelector: labels.SelectorFromSet(map[string]string{
+ models.ClusterIDLabel: b.ClusterStatus.ID,
+ models.NodeLabel: models.WorkerNode,
+ }),
+ Namespace: b.K8sObject.GetNamespace(),
+ })
+ if err != nil {
+ l.Error(err, "Cannot get on-premises cluster pods",
+ "cluster name", b.K8sObject.GetName(),
+ "clusterID", b.ClusterStatus.ID,
+ )
+
+ b.EventRecorder.Eventf(
+ b.K8sObject, models.Warning, models.CreationFailed,
+ "Fetching on-premises cluster pods is failed. Reason: %v",
+ err,
+ )
+ return err
+ }
+
+ l.Info("allNodePODS", "allNodePODS", allNodePods.Items)
+ l.Info("Nodes", "Nodes", b.ClusterStatus.DataCentres[0].Nodes)
+
+ if len(allNodePods.Items) != len(b.ClusterStatus.DataCentres[0].Nodes) {
+ err = fmt.Errorf("the quantity of pods does not match the number of on-premises nodes")
+ l.Error(err, "Cannot compare private IPs for the cluster",
+ "cluster name", b.K8sObject.GetName(),
+ "clusterID", b.ClusterStatus.ID,
+ )
+
+ b.EventRecorder.Eventf(
+ b.K8sObject, models.Warning, models.CreationFailed,
+ "Comparing of cluster's private IPs is failing. Reason: %v",
+ err,
+ )
+ return err
+ }
+
+ for _, node := range b.ClusterStatus.DataCentres[0].Nodes {
+ nodePods := &k8scorev1.PodList{}
+ err = b.K8sClient.List(context.Background(), nodePods, &client.ListOptions{
+ LabelSelector: labels.SelectorFromSet(map[string]string{
+ models.ClusterIDLabel: b.ClusterStatus.ID,
+ models.NodeIDLabel: node.ID,
+ }),
+ Namespace: b.K8sObject.GetNamespace(),
+ })
+ if err != nil {
+ l.Error(err, "Cannot get on-premises cluster pods",
+ "cluster name", b.K8sObject.GetName(),
+ "clusterID", b.ClusterStatus.ID,
+ )
+
+ b.EventRecorder.Eventf(
+ b.K8sObject, models.Warning, models.CreationFailed,
+ "Fetching on-premises cluster pods is failed. Reason: %v",
+ err,
+ )
+ return err
+ }
+
+ l.Info("nodePODS", "nodePODS", nodePods.Items)
+
+ for _, pod := range nodePods.Items {
+ l.Info("podIP", "podIP", pod.Status.PodIP)
+ l.Info("node.PrivateAddress", "node.PrivateAddress", node.PrivateAddress)
+
+ if (pod.Status.PodIP != "" && node.PrivateAddress != "") &&
+ (pod.Status.PodIP != node.PrivateAddress) {
+
+ err = fmt.Errorf("private IPs was changed")
+ l.Error(err, "Node's private IP addresses are not equal",
+ "cluster name", b.K8sObject.GetName(),
+ "clusterID", b.ClusterStatus.ID,
+ "nodeID", node.ID,
+ "nodeIP", node.PrivateAddress,
+ "podIP", pod.Status.PodIP,
+ )
+
+ b.EventRecorder.Eventf(
+ b.K8sObject, models.Warning, models.CreationFailed,
+ "The private IP addresses of the node are not matching. Reason: %v",
+ err,
+ )
+ return err
+ }
+ }
+
+ if !b.PrivateNetworkCluster {
+ nodeSVCs := &k8scorev1.ServiceList{}
+ err = b.K8sClient.List(context.Background(), nodeSVCs, &client.ListOptions{
+ LabelSelector: labels.SelectorFromSet(map[string]string{
+ models.ClusterIDLabel: b.ClusterStatus.ID,
+ models.NodeIDLabel: node.ID,
+ }),
+ Namespace: b.K8sObject.GetNamespace(),
+ })
+ if err != nil {
+ l.Error(err, "Cannot get services backed by on-premises cluster pods",
+ "cluster name", b.K8sObject.GetName(),
+ "clusterID", b.ClusterStatus.ID,
+ )
+
+ b.EventRecorder.Eventf(
+ b.K8sObject, models.Warning, models.CreationFailed,
+ "Fetching services backed by on-premises cluster pods is failed. Reason: %v",
+ err,
+ )
+ return err
+ }
+ for _, svc := range nodeSVCs.Items {
+ l.Info("svcIP", "svcIP", svc.Status.LoadBalancer.Ingress[0].IP)
+ l.Info("node.PublicAddress", "node.PublicAddress", node.PublicAddress)
+
+ if (svc.Status.LoadBalancer.Ingress[0].IP != "" && node.PublicAddress != "") &&
+ (svc.Status.LoadBalancer.Ingress[0].IP != node.PublicAddress) {
+
+ err = fmt.Errorf("public IPs was changed")
+ l.Error(err, "Node's public IP addresses are not equal",
+ "cluster name", b.K8sObject.GetName(),
+ "clusterID", b.ClusterStatus.ID,
+ "nodeID", node.ID,
+ "nodeIP", node.PrivateAddress,
+ "svcIP", svc.Status.LoadBalancer.Ingress[0].IP,
+ )
+
+ b.EventRecorder.Eventf(
+ b.K8sObject, models.Warning, models.CreationFailed,
+ "The public IP addresses of the node are not matching. Reason: %v",
+ err,
+ )
+ return err
+ }
+ }
+ }
+ }
+ return nil
+ }
+}
diff --git a/controllers/clusters/opensearch_controller.go b/controllers/clusters/opensearch_controller.go
index b34807a30..516510897 100644
--- a/controllers/clusters/opensearch_controller.go
+++ b/controllers/clusters/opensearch_controller.go
@@ -276,7 +276,7 @@ func (r *OpenSearchReconciler) HandleUpdateCluster(
"cluster ID", o.Status.ID,
)
- r.EventRecorder.Eventf(o, models.Warning, models.ConvertionFailed,
+ r.EventRecorder.Eventf(o, models.Warning, models.ConversionFailed,
"Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err)
return reconcile.Result{}, err
diff --git a/controllers/clusters/postgresql_controller.go b/controllers/clusters/postgresql_controller.go
index 046f3f383..39c04d91a 100644
--- a/controllers/clusters/postgresql_controller.go
+++ b/controllers/clusters/postgresql_controller.go
@@ -62,9 +62,16 @@ type PostgreSQLReconciler struct {
//+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=postgresqls/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=postgresqls/finalizers,verbs=update
//+kubebuilder:rbac:groups=clusterresources.instaclustr.com,resources=clusterbackups,verbs=get;list;create;update;patch;deletecollection;delete
-//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;watch;create;delete;update
//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch
//+kubebuilder:rbac:groups="",resources=nodes,verbs=get;watch;list
+//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
@@ -72,19 +79,19 @@ type PostgreSQLReconciler struct {
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.0/pkg/reconcile
func (r *PostgreSQLReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
- logger := log.FromContext(ctx)
+ l := log.FromContext(ctx)
pg := &v1beta1.PostgreSQL{}
err := r.Client.Get(ctx, req.NamespacedName, pg)
if err != nil {
if k8serrors.IsNotFound(err) {
- logger.Info("PostgreSQL custom resource is not found",
+ l.Info("PostgreSQL custom resource is not found",
"resource name", req.NamespacedName,
)
return models.ExitReconcile, nil
}
- logger.Error(err, "Unable to fetch PostgreSQL cluster",
+ l.Error(err, "Unable to fetch PostgreSQL cluster",
"resource name", req.NamespacedName,
)
@@ -93,22 +100,22 @@ func (r *PostgreSQLReconciler) Reconcile(ctx context.Context, req ctrl.Request)
switch pg.Annotations[models.ResourceStateAnnotation] {
case models.CreatingEvent:
- return r.handleCreateCluster(ctx, pg, logger)
+ return r.handleCreateCluster(ctx, pg, l)
case models.UpdatingEvent:
- return r.handleUpdateCluster(ctx, pg, logger)
+ return r.handleUpdateCluster(ctx, pg, l)
case models.DeletingEvent:
- return r.handleDeleteCluster(ctx, pg, logger)
+ return r.handleDeleteCluster(ctx, pg, l)
case models.SecretEvent:
- return r.handleUpdateDefaultUserPassword(ctx, pg, logger)
+ return r.handleUpdateDefaultUserPassword(ctx, pg, l)
case models.GenericEvent:
- logger.Info("PostgreSQL resource generic event isn't handled",
+ l.Info("PostgreSQL resource generic event isn't handled",
"cluster name", pg.Spec.Name,
"request", req,
"event", pg.Annotations[models.ResourceStateAnnotation],
)
return models.ExitReconcile, nil
default:
- logger.Info("PostgreSQL resource event isn't handled",
+ l.Info("PostgreSQL resource event isn't handled",
"cluster name", pg.Spec.Name,
"request", req,
"event", pg.Annotations[models.ResourceStateAnnotation],
@@ -120,23 +127,23 @@ func (r *PostgreSQLReconciler) Reconcile(ctx context.Context, req ctrl.Request)
func (r *PostgreSQLReconciler) handleCreateCluster(
ctx context.Context,
pg *v1beta1.PostgreSQL,
- logger logr.Logger,
+ l logr.Logger,
) (reconcile.Result, error) {
- logger = logger.WithName("PostgreSQL creation event")
+ l = l.WithName("PostgreSQL creation event")
var err error
patch := pg.NewPatch()
if pg.Status.ID == "" {
if pg.Spec.HasRestore() {
- logger.Info(
+ l.Info(
"Creating PostgreSQL cluster from backup",
"original cluster ID", pg.Spec.PgRestoreFrom.ClusterID,
)
pg.Status.ID, err = r.API.RestoreCluster(pg.RestoreInfoToInstAPI(pg.Spec.PgRestoreFrom), models.PgRestoreValue)
if err != nil {
- logger.Error(err, "Cannot restore PostgreSQL cluster from backup",
+ l.Error(err, "Cannot restore PostgreSQL cluster from backup",
"original cluster ID", pg.Spec.PgRestoreFrom.ClusterID,
)
@@ -156,7 +163,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster(
pg.Status.ID,
)
} else {
- logger.Info(
+ l.Info(
"Creating PostgreSQL cluster",
"cluster name", pg.Spec.Name,
"data centres", pg.Spec.DataCentres,
@@ -166,7 +173,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster(
pg.Status.ID, err = r.API.CreateCluster(instaclustr.PGSQLEndpoint, pgSpec)
if err != nil {
- logger.Error(
+ l.Error(
err, "Cannot create PostgreSQL cluster",
"spec", pg.Spec,
)
@@ -189,7 +196,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster(
err = r.Status().Patch(ctx, pg, patch)
if err != nil {
- logger.Error(err, "Cannot patch PostgreSQL resource status",
+ l.Error(err, "Cannot patch PostgreSQL resource status",
"cluster name", pg.Spec.Name,
"status", pg.Status,
)
@@ -203,7 +210,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster(
return reconcile.Result{}, err
}
- logger.Info(
+ l.Info(
"PostgreSQL resource has been created",
"cluster name", pg.Name,
"cluster ID", pg.Status.ID,
@@ -218,7 +225,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster(
pg.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent
err = r.Patch(ctx, pg, patch)
if err != nil {
- logger.Error(err, "Cannot patch PostgreSQL resource",
+ l.Error(err, "Cannot patch PostgreSQL resource",
"cluster name", pg.Spec.Name,
"status", pg.Status)
@@ -232,7 +239,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster(
if pg.Status.State != models.DeletedStatus {
err = r.startClusterStatusJob(pg)
if err != nil {
- logger.Error(err, "Cannot start PostgreSQL cluster status check job",
+ l.Error(err, "Cannot start PostgreSQL cluster status check job",
"cluster ID", pg.Status.ID,
)
@@ -249,9 +256,87 @@ func (r *PostgreSQLReconciler) handleCreateCluster(
"Cluster status check job is started",
)
+ if pg.Spec.OnPremisesSpec != nil {
+ iData, err := r.API.GetPostgreSQL(pg.Status.ID)
+ if err != nil {
+ l.Error(err, "Cannot get cluster from the Instaclustr API",
+ "cluster name", pg.Spec.Name,
+ "data centres", pg.Spec.DataCentres,
+ "cluster ID", pg.Status.ID,
+ )
+ r.EventRecorder.Eventf(
+ pg, models.Warning, models.FetchFailed,
+ "Cluster fetch from the Instaclustr API is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+ iPostgreSQL, err := pg.FromInstAPI(iData)
+ if err != nil {
+ l.Error(
+ err, "Cannot convert cluster from the Instaclustr API",
+ "cluster name", pg.Spec.Name,
+ "cluster ID", pg.Status.ID,
+ )
+ r.EventRecorder.Eventf(
+ pg, models.Warning, models.ConversionFailed,
+ "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ bootstrap := newOnPremisesBootstrap(
+ r.Client,
+ pg,
+ r.EventRecorder,
+ iPostgreSQL.Status.ClusterStatus,
+ pg.Spec.OnPremisesSpec,
+ newExposePorts(pg.GetExposePorts()),
+ pg.GetHeadlessPorts(),
+ pg.Spec.PrivateNetworkCluster,
+ )
+
+ err = handleCreateOnPremisesClusterResources(ctx, bootstrap)
+ if err != nil {
+ l.Error(
+ err, "Cannot create resources for on-premises cluster",
+ "cluster spec", pg.Spec.OnPremisesSpec,
+ )
+ r.EventRecorder.Eventf(
+ pg, models.Warning, models.CreationFailed,
+ "Resources creation for on-premises cluster is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ err = r.startClusterOnPremisesIPsJob(pg, bootstrap)
+ if err != nil {
+ l.Error(err, "Cannot start on-premises cluster IPs check job",
+ "cluster ID", pg.Status.ID,
+ )
+
+ r.EventRecorder.Eventf(
+ pg, models.Warning, models.CreationFailed,
+ "On-premises cluster IPs check job is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ l.Info(
+ "On-premises resources have been created",
+ "cluster name", pg.Spec.Name,
+ "on-premises Spec", pg.Spec.OnPremisesSpec,
+ "cluster ID", pg.Status.ID,
+ )
+ return models.ExitReconcile, nil
+ }
+
err = r.startClusterBackupsJob(pg)
if err != nil {
- logger.Error(err, "Cannot start PostgreSQL cluster backups check job",
+ l.Error(err, "Cannot start PostgreSQL cluster backups check job",
"cluster ID", pg.Status.ID,
)
@@ -271,7 +356,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster(
if pg.Spec.UserRefs != nil {
err = r.startUsersCreationJob(pg)
if err != nil {
- logger.Error(err, "Failed to start user PostreSQL creation job")
+ l.Error(err, "Failed to start user PostreSQL creation job")
r.EventRecorder.Eventf(pg, models.Warning, models.CreationFailed,
"User creation job is failed. Reason: %v", err)
return reconcile.Result{}, err
@@ -282,9 +367,9 @@ func (r *PostgreSQLReconciler) handleCreateCluster(
}
}
- err = r.createDefaultPassword(ctx, pg, logger)
+ err = r.createDefaultPassword(ctx, pg, l)
if err != nil {
- logger.Error(err, "Cannot create default password for PostgreSQL",
+ l.Error(err, "Cannot create default password for PostgreSQL",
"cluster name", pg.Spec.Name,
"clusterID", pg.Status.ID,
)
@@ -304,13 +389,13 @@ func (r *PostgreSQLReconciler) handleCreateCluster(
func (r *PostgreSQLReconciler) handleUpdateCluster(
ctx context.Context,
pg *v1beta1.PostgreSQL,
- logger logr.Logger,
+ l logr.Logger,
) (reconcile.Result, error) {
- logger = logger.WithName("PostgreSQL update event")
+ l = l.WithName("PostgreSQL update event")
iData, err := r.API.GetPostgreSQL(pg.Status.ID)
if err != nil {
- logger.Error(
+ l.Error(
err, "Cannot get PostgreSQL cluster status from the Instaclustr API",
"cluster name", pg.Spec.Name,
"cluster ID", pg.Status.ID,
@@ -326,14 +411,14 @@ func (r *PostgreSQLReconciler) handleUpdateCluster(
iPg, err := pg.FromInstAPI(iData)
if err != nil {
- logger.Error(
+ l.Error(
err, "Cannot convert PostgreSQL cluster status from the Instaclustr API",
"cluster name", pg.Spec.Name,
"cluster ID", pg.Status.ID,
)
r.EventRecorder.Eventf(
- pg, models.Warning, models.ConvertionFailed,
+ pg, models.Warning, models.ConversionFailed,
"Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v",
err,
)
@@ -341,7 +426,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster(
}
if iPg.Status.CurrentClusterOperationStatus != models.NoOperation {
- logger.Info("PostgreSQL cluster is not ready to update",
+ l.Info("PostgreSQL cluster is not ready to update",
"cluster name", pg.Spec.Name,
"cluster status", iPg.Status.State,
"current operation status", iPg.Status.CurrentClusterOperationStatus,
@@ -350,7 +435,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster(
pg.Annotations[models.UpdateQueuedAnnotation] = models.True
err = r.Patch(ctx, pg, patch)
if err != nil {
- logger.Error(err, "Cannot patch cluster resource",
+ l.Error(err, "Cannot patch cluster resource",
"cluster name", pg.Spec.Name, "cluster ID", pg.Status.ID)
r.EventRecorder.Eventf(
@@ -364,17 +449,17 @@ func (r *PostgreSQLReconciler) handleUpdateCluster(
}
if pg.Annotations[models.ExternalChangesAnnotation] == models.True {
- return r.handleExternalChanges(pg, iPg, logger)
+ return r.handleExternalChanges(pg, iPg, l)
}
if pg.Spec.ClusterSettingsNeedUpdate(iPg.Spec.Cluster) {
- logger.Info("Updating cluster settings",
+ l.Info("Updating cluster settings",
"instaclustr description", iPg.Spec.Description,
"instaclustr two factor delete", iPg.Spec.TwoFactorDelete)
err = r.API.UpdateClusterSettings(pg.Status.ID, pg.Spec.ClusterSettingsUpdateToInstAPI())
if err != nil {
- logger.Error(err, "Cannot update cluster settings",
+ l.Error(err, "Cannot update cluster settings",
"cluster ID", pg.Status.ID, "cluster spec", pg.Spec)
r.EventRecorder.Eventf(pg, models.Warning, models.UpdateFailed,
"Cannot update cluster settings. Reason: %v", err)
@@ -384,14 +469,14 @@ func (r *PostgreSQLReconciler) handleUpdateCluster(
}
if !pg.Spec.AreDCsEqual(iPg.Spec.DataCentres) {
- logger.Info("Update request to Instaclustr API has been sent",
+ l.Info("Update request to Instaclustr API has been sent",
"spec data centres", pg.Spec.DataCentres,
"resize settings", pg.Spec.ResizeSettings,
)
err = r.updateCluster(pg)
if err != nil {
- logger.Error(err, "Cannot update Data Centres",
+ l.Error(err, "Cannot update Data Centres",
"cluster name", pg.Spec.Name,
)
@@ -405,7 +490,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster(
pg.Annotations[models.UpdateQueuedAnnotation] = models.True
err = r.Patch(ctx, pg, patch)
if err != nil {
- logger.Error(err, "Cannot patch PostgreSQL metadata",
+ l.Error(err, "Cannot patch PostgreSQL metadata",
"cluster name", pg.Spec.Name,
"cluster metadata", pg.ObjectMeta,
)
@@ -420,7 +505,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster(
return reconcile.Result{}, err
}
- logger.Info(
+ l.Info(
"Cluster has been updated",
"cluster name", pg.Spec.Name,
"cluster ID", pg.Status.ID,
@@ -430,7 +515,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster(
iConfigs, err := r.API.GetPostgreSQLConfigs(pg.Status.ID)
if err != nil {
- logger.Error(err, "Cannot get PostgreSQL cluster configs",
+ l.Error(err, "Cannot get PostgreSQL cluster configs",
"cluster name", pg.Spec.Name,
"clusterID", pg.Status.ID,
)
@@ -449,7 +534,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster(
pg.Spec.ClusterConfigurations,
iConfig.ConfigurationProperties)
if err != nil {
- logger.Error(err, "Cannot reconcile PostgreSQL cluster configs",
+ l.Error(err, "Cannot reconcile PostgreSQL cluster configs",
"cluster name", pg.Spec.Name,
"clusterID", pg.Status.ID,
"configs", pg.Spec.ClusterConfigurations,
@@ -464,16 +549,16 @@ func (r *PostgreSQLReconciler) handleUpdateCluster(
return reconcile.Result{}, err
}
- logger.Info("PostgreSQL cluster configurations were updated",
+ l.Info("PostgreSQL cluster configurations were updated",
"cluster name", pg.Spec.Name,
)
}
pg.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent
pg.Annotations[models.UpdateQueuedAnnotation] = ""
- err = r.patchClusterMetadata(ctx, pg, logger)
+ err = r.patchClusterMetadata(ctx, pg, l)
if err != nil {
- logger.Error(err, "Cannot patch PostgreSQL resource metadata",
+ l.Error(err, "Cannot patch PostgreSQL resource metadata",
"cluster name", pg.Spec.Name,
"cluster metadata", pg.ObjectMeta,
)
@@ -486,7 +571,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster(
return reconcile.Result{}, err
}
- logger.Info("PostgreSQL cluster was updated",
+ l.Info("PostgreSQL cluster was updated",
"cluster name", pg.Spec.Name,
"cluster status", pg.Status.State,
)
@@ -790,13 +875,13 @@ func (r *PostgreSQLReconciler) handleExternalChanges(pg, iPg *v1beta1.PostgreSQL
func (r *PostgreSQLReconciler) handleDeleteCluster(
ctx context.Context,
pg *v1beta1.PostgreSQL,
- logger logr.Logger,
+ l logr.Logger,
) (reconcile.Result, error) {
- logger = logger.WithName("PostgreSQL deletion event")
+ l = l.WithName("PostgreSQL deletion event")
_, err := r.API.GetPostgreSQL(pg.Status.ID)
if err != nil && !errors.Is(err, instaclustr.NotFound) {
- logger.Error(err, "Cannot get PostgreSQL cluster status",
+ l.Error(err, "Cannot get PostgreSQL cluster status",
"cluster name", pg.Spec.Name,
"cluster ID", pg.Status.ID,
)
@@ -810,13 +895,13 @@ func (r *PostgreSQLReconciler) handleDeleteCluster(
}
if !errors.Is(err, instaclustr.NotFound) {
- logger.Info("Sending cluster deletion to the Instaclustr API",
+ l.Info("Sending cluster deletion to the Instaclustr API",
"cluster name", pg.Spec.Name,
"cluster ID", pg.Status.ID)
err = r.API.DeleteCluster(pg.Status.ID, instaclustr.PGSQLEndpoint)
if err != nil {
- logger.Error(err, "Cannot delete PostgreSQL cluster",
+ l.Error(err, "Cannot delete PostgreSQL cluster",
"cluster name", pg.Spec.Name,
"cluster status", pg.Status.State,
)
@@ -839,7 +924,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster(
pg.Annotations[models.ClusterDeletionAnnotation] = models.Triggered
err = r.Patch(ctx, pg, patch)
if err != nil {
- logger.Error(err, "Cannot patch cluster resource",
+ l.Error(err, "Cannot patch cluster resource",
"cluster name", pg.Spec.Name,
"cluster state", pg.Status.State)
r.EventRecorder.Eventf(pg, models.Warning, models.PatchFailed,
@@ -849,26 +934,43 @@ func (r *PostgreSQLReconciler) handleDeleteCluster(
return reconcile.Result{}, err
}
- logger.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", pg.Status.ID)
+ l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", pg.Status.ID)
r.EventRecorder.Event(pg, models.Normal, models.DeletionStarted,
"Two-Factor Delete is enabled, please confirm cluster deletion via email or phone.")
return models.ExitReconcile, nil
}
+
+ if pg.Spec.OnPremisesSpec != nil {
+ err = deleteOnPremResources(ctx, r.Client, pg.Status.ID, pg.Namespace)
+ if err != nil {
+ l.Error(err, "Cannot delete cluster on-premises resources",
+ "cluster ID", pg.Status.ID)
+ r.EventRecorder.Eventf(pg, models.Warning, models.DeletionFailed,
+ "Cluster on-premises resources deletion is failed. Reason: %v", err)
+ return reconcile.Result{}, err
+ }
+
+ l.Info("Cluster on-premises resources are deleted",
+ "cluster ID", pg.Status.ID)
+ r.EventRecorder.Eventf(pg, models.Normal, models.Deleted,
+ "Cluster on-premises resources are deleted")
+ r.Scheduler.RemoveJob(pg.GetJobID(scheduler.OnPremisesIPsChecker))
+ }
}
- logger.Info("PostgreSQL cluster is being deleted. Deleting PostgreSQL default user secret",
+ l.Info("PostgreSQL cluster is being deleted. Deleting PostgreSQL default user secret",
"cluster ID", pg.Status.ID,
)
- logger.Info("Deleting cluster backup resources",
+ l.Info("Deleting cluster backup resources",
"cluster ID", pg.Status.ID,
)
err = r.deleteBackups(ctx, pg.Status.ID, pg.Namespace)
if err != nil {
- logger.Error(err, "Cannot delete PostgreSQL backup resources",
+ l.Error(err, "Cannot delete PostgreSQL backup resources",
"cluster ID", pg.Status.ID,
)
r.EventRecorder.Eventf(
@@ -879,7 +981,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster(
return reconcile.Result{}, err
}
- logger.Info("Cluster backup resources were deleted",
+ l.Info("Cluster backup resources were deleted",
"cluster ID", pg.Status.ID,
)
@@ -893,7 +995,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster(
r.Scheduler.RemoveJob(pg.GetJobID(scheduler.StatusChecker))
for _, ref := range pg.Spec.UserRefs {
- err = r.handleUsersDetach(ctx, logger, pg, ref)
+ err = r.handleUsersDetach(ctx, l, pg, ref)
if err != nil {
return reconcile.Result{}, err
}
@@ -901,9 +1003,9 @@ func (r *PostgreSQLReconciler) handleDeleteCluster(
controllerutil.RemoveFinalizer(pg, models.DeletionFinalizer)
pg.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent
- err = r.patchClusterMetadata(ctx, pg, logger)
+ err = r.patchClusterMetadata(ctx, pg, l)
if err != nil {
- logger.Error(
+ l.Error(
err, "Cannot patch PostgreSQL resource metadata after finalizer removal",
"cluster name", pg.Spec.Name,
"cluster ID", pg.Status.ID,
@@ -919,7 +1021,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster(
err = r.deleteSecret(ctx, pg)
if client.IgnoreNotFound(err) != nil {
- logger.Error(err, "Cannot delete PostgreSQL default user secret",
+ l.Error(err, "Cannot delete PostgreSQL default user secret",
"cluster ID", pg.Status.ID,
)
@@ -931,7 +1033,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster(
return reconcile.Result{}, err
}
- logger.Info("Cluster PostgreSQL default user secret was deleted",
+ l.Info("Cluster PostgreSQL default user secret was deleted",
"cluster ID", pg.Status.ID,
)
@@ -943,7 +1045,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster(
err = exposeservice.Delete(r.Client, pg.Name, pg.Namespace)
if err != nil {
- logger.Error(err, "Cannot delete PostgreSQL cluster expose service",
+ l.Error(err, "Cannot delete PostgreSQL cluster expose service",
"cluster ID", pg.Status.ID,
"cluster name", pg.Spec.Name,
)
@@ -951,7 +1053,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster(
return reconcile.Result{}, err
}
- logger.Info("PostgreSQL cluster was deleted",
+ l.Info("PostgreSQL cluster was deleted",
"cluster name", pg.Spec.Name,
"cluster ID", pg.Status.ID,
)
@@ -967,13 +1069,13 @@ func (r *PostgreSQLReconciler) handleDeleteCluster(
func (r *PostgreSQLReconciler) handleUpdateDefaultUserPassword(
ctx context.Context,
pg *v1beta1.PostgreSQL,
- logger logr.Logger,
+ l logr.Logger,
) (reconcile.Result, error) {
- logger = logger.WithName("PostgreSQL default user password updating event")
+ l = l.WithName("PostgreSQL default user password updating event")
secret, err := v1beta1.GetDefaultPgUserSecret(ctx, pg.Name, pg.Namespace, r.Client)
if err != nil {
- logger.Error(err, "Cannot get the default secret for the PostgreSQL cluster",
+ l.Error(err, "Cannot get the default secret for the PostgreSQL cluster",
"cluster name", pg.Spec.Name,
"cluster ID", pg.Status.ID,
)
@@ -990,7 +1092,7 @@ func (r *PostgreSQLReconciler) handleUpdateDefaultUserPassword(
password := string(secret.Data[models.Password])
isValid := pg.ValidateDefaultUserPassword(password)
if !isValid {
- logger.Error(err, "Default PostgreSQL user password is not valid. This field must be at least 8 characters long. Must contain characters from at least 3 of the following 4 categories: Uppercase, Lowercase, Numbers, Special Characters",
+ l.Error(err, "Default PostgreSQL user password is not valid. This field must be at least 8 characters long. Must contain characters from at least 3 of the following 4 categories: Uppercase, Lowercase, Numbers, Special Characters",
"cluster name", pg.Spec.Name,
"cluster ID", pg.Status.ID,
)
@@ -1006,7 +1108,7 @@ func (r *PostgreSQLReconciler) handleUpdateDefaultUserPassword(
err = r.API.UpdatePostgreSQLDefaultUserPassword(pg.Status.ID, password)
if err != nil {
- logger.Error(err, "Cannot update default PostgreSQL user password",
+ l.Error(err, "Cannot update default PostgreSQL user password",
"cluster name", pg.Spec.Name,
"cluster ID", pg.Status.ID,
)
@@ -1021,9 +1123,9 @@ func (r *PostgreSQLReconciler) handleUpdateDefaultUserPassword(
}
pg.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent
- err = r.patchClusterMetadata(ctx, pg, logger)
+ err = r.patchClusterMetadata(ctx, pg, l)
if err != nil {
- logger.Error(err, "Cannot patch PostgreSQL resource metadata",
+ l.Error(err, "Cannot patch PostgreSQL resource metadata",
"cluster name", pg.Spec.Name,
"cluster metadata", pg.ObjectMeta,
)
@@ -1036,7 +1138,7 @@ func (r *PostgreSQLReconciler) handleUpdateDefaultUserPassword(
return reconcile.Result{}, err
}
- logger.Info("PostgreSQL default user password was updated",
+ l.Info("PostgreSQL default user password was updated",
"cluster name", pg.Spec.Name,
"cluster ID", pg.Status.ID,
)
@@ -1049,6 +1151,17 @@ func (r *PostgreSQLReconciler) handleUpdateDefaultUserPassword(
return models.ExitReconcile, nil
}
+func (r *PostgreSQLReconciler) startClusterOnPremisesIPsJob(pg *v1beta1.PostgreSQL, b *onPremisesBootstrap) error {
+ job := newWatchOnPremisesIPsJob(pg.Kind, b)
+
+ err := r.Scheduler.ScheduleJob(pg.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
func (r *PostgreSQLReconciler) startClusterStatusJob(pg *v1beta1.PostgreSQL) error {
job := r.newWatchStatusJob(pg)
@@ -1572,7 +1685,7 @@ func (r *PostgreSQLReconciler) reconcileClusterConfigurations(
func (r *PostgreSQLReconciler) patchClusterMetadata(
ctx context.Context,
pgCluster *v1beta1.PostgreSQL,
- logger logr.Logger,
+ l logr.Logger,
) error {
patchRequest := []*v1beta1.PatchRequest{}
@@ -1610,7 +1723,7 @@ func (r *PostgreSQLReconciler) patchClusterMetadata(
return err
}
- logger.Info("PostgreSQL cluster patched",
+ l.Info("PostgreSQL cluster patched",
"Cluster name", pgCluster.Spec.Name,
"Finalizers", pgCluster.Finalizers,
"Annotations", pgCluster.Annotations,
diff --git a/controllers/clusters/redis_controller.go b/controllers/clusters/redis_controller.go
index e11f6d03f..311e48b25 100644
--- a/controllers/clusters/redis_controller.go
+++ b/controllers/clusters/redis_controller.go
@@ -58,6 +58,14 @@ type RedisReconciler struct {
//+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=redis/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=redis/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch
+//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection
+//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
@@ -65,19 +73,19 @@ type RedisReconciler struct {
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.0/pkg/reconcile
func (r *RedisReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
- logger := log.FromContext(ctx)
+ l := log.FromContext(ctx)
redis := &v1beta1.Redis{}
err := r.Client.Get(ctx, req.NamespacedName, redis)
if err != nil {
if k8serrors.IsNotFound(err) {
- logger.Info("Redis cluster resource is not found",
+ l.Info("Redis cluster resource is not found",
"resource name", req.NamespacedName,
)
return models.ExitReconcile, nil
}
- logger.Error(err, "Unable to fetch Redis cluster",
+ l.Error(err, "Unable to fetch Redis cluster",
"resource name", req.NamespacedName,
)
return models.ExitReconcile, nil
@@ -85,19 +93,19 @@ func (r *RedisReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl
switch redis.Annotations[models.ResourceStateAnnotation] {
case models.CreatingEvent:
- return r.handleCreateCluster(ctx, redis, logger)
+ return r.handleCreateCluster(ctx, redis, l)
case models.UpdatingEvent:
- return r.handleUpdateCluster(ctx, redis, logger)
+ return r.handleUpdateCluster(ctx, redis, l)
case models.DeletingEvent:
- return r.handleDeleteCluster(ctx, redis, logger)
+ return r.handleDeleteCluster(ctx, redis, l)
case models.GenericEvent:
- logger.Info("Redis generic event isn't handled",
+ l.Info("Redis generic event isn't handled",
"cluster name", redis.Spec.Name,
"request", req,
)
return models.ExitReconcile, nil
default:
- logger.Info("Unknown event isn't handled",
+ l.Info("Unknown event isn't handled",
"cluster name", redis.Spec.Name,
"request", req,
)
@@ -108,20 +116,20 @@ func (r *RedisReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl
func (r *RedisReconciler) handleCreateCluster(
ctx context.Context,
redis *v1beta1.Redis,
- logger logr.Logger,
+ l logr.Logger,
) (reconcile.Result, error) {
var err error
if redis.Status.ID == "" {
var id string
if redis.Spec.HasRestore() {
- logger.Info(
+ l.Info(
"Creating Redis cluster from backup",
"original cluster ID", redis.Spec.RestoreFrom.ClusterID,
)
id, err = r.API.RestoreCluster(redis.RestoreInfoToInstAPI(redis.Spec.RestoreFrom), models.RedisAppKind)
if err != nil {
- logger.Error(
+ l.Error(
err, "Cannot restore Redis cluster from backup",
"original cluster ID", redis.Spec.RestoreFrom.ClusterID,
)
@@ -133,7 +141,7 @@ func (r *RedisReconciler) handleCreateCluster(
return reconcile.Result{}, err
}
- logger.Info(
+ l.Info(
"Redis cluster was created from backup",
"original cluster ID", redis.Spec.RestoreFrom.ClusterID,
)
@@ -145,7 +153,7 @@ func (r *RedisReconciler) handleCreateCluster(
id,
)
} else {
- logger.Info(
+ l.Info(
"Creating Redis cluster",
"cluster name", redis.Spec.Name,
"data centres", redis.Spec.DataCentres,
@@ -153,7 +161,7 @@ func (r *RedisReconciler) handleCreateCluster(
id, err = r.API.CreateCluster(instaclustr.RedisEndpoint, redis.Spec.ToInstAPI())
if err != nil {
- logger.Error(
+ l.Error(
err, "Cannot create Redis cluster",
"cluster manifest", redis.Spec,
)
@@ -165,7 +173,7 @@ func (r *RedisReconciler) handleCreateCluster(
return reconcile.Result{}, err
}
- logger.Info(
+ l.Info(
"Redis cluster was created",
"cluster ID", id,
"cluster name", redis.Spec.Name,
@@ -181,7 +189,7 @@ func (r *RedisReconciler) handleCreateCluster(
redis.Status.ID = id
err = r.Status().Patch(ctx, redis, patch)
if err != nil {
- logger.Error(err, "Cannot patch Redis cluster status",
+ l.Error(err, "Cannot patch Redis cluster status",
"cluster name", redis.Spec.Name)
r.EventRecorder.Eventf(redis, models.Warning, models.PatchFailed,
"Cluster resource status patch is failed. Reason: %v", err)
@@ -189,7 +197,7 @@ func (r *RedisReconciler) handleCreateCluster(
return reconcile.Result{}, err
}
- logger.Info("Redis resource has been created",
+ l.Info("Redis resource has been created",
"cluster name", redis.Name,
"cluster ID", redis.Status.ID,
"api version", redis.APIVersion)
@@ -200,7 +208,7 @@ func (r *RedisReconciler) handleCreateCluster(
redis.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent
err = r.Patch(ctx, redis, patch)
if err != nil {
- logger.Error(err, "Cannot patch Redis cluster",
+ l.Error(err, "Cannot patch Redis cluster",
"cluster name", redis.Spec.Name,
"cluster metadata", redis.ObjectMeta,
)
@@ -215,7 +223,7 @@ func (r *RedisReconciler) handleCreateCluster(
if redis.Status.State != models.DeletedStatus {
err = r.startClusterStatusJob(redis)
if err != nil {
- logger.Error(err, "Cannot start cluster status job",
+ l.Error(err, "Cannot start cluster status job",
"redis cluster ID", redis.Status.ID,
)
@@ -232,9 +240,87 @@ func (r *RedisReconciler) handleCreateCluster(
"Cluster status check job is started",
)
+ if redis.Spec.OnPremisesSpec != nil {
+ iData, err := r.API.GetRedis(redis.Status.ID)
+ if err != nil {
+ l.Error(err, "Cannot get cluster from the Instaclustr API",
+ "cluster name", redis.Spec.Name,
+ "data centres", redis.Spec.DataCentres,
+ "cluster ID", redis.Status.ID,
+ )
+ r.EventRecorder.Eventf(
+ redis, models.Warning, models.FetchFailed,
+ "Cluster fetch from the Instaclustr API is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+ iRedis, err := redis.FromInstAPI(iData)
+ if err != nil {
+ l.Error(
+ err, "Cannot convert cluster from the Instaclustr API",
+ "cluster name", redis.Spec.Name,
+ "cluster ID", redis.Status.ID,
+ )
+ r.EventRecorder.Eventf(
+ redis, models.Warning, models.ConversionFailed,
+ "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ bootstrap := newOnPremisesBootstrap(
+ r.Client,
+ redis,
+ r.EventRecorder,
+ iRedis.Status.ClusterStatus,
+ redis.Spec.OnPremisesSpec,
+ newExposePorts(redis.GetExposePorts()),
+ redis.GetHeadlessPorts(),
+ redis.Spec.PrivateNetworkCluster,
+ )
+
+ err = handleCreateOnPremisesClusterResources(ctx, bootstrap)
+ if err != nil {
+ l.Error(
+ err, "Cannot create resources for on-premises cluster",
+ "cluster spec", redis.Spec.OnPremisesSpec,
+ )
+ r.EventRecorder.Eventf(
+ redis, models.Warning, models.CreationFailed,
+ "Resources creation for on-premises cluster is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ err = r.startClusterOnPremisesIPsJob(redis, bootstrap)
+ if err != nil {
+ l.Error(err, "Cannot start on-premises cluster IPs check job",
+ "cluster ID", redis.Status.ID,
+ )
+
+ r.EventRecorder.Eventf(
+ redis, models.Warning, models.CreationFailed,
+ "On-premises cluster IPs check job is failed. Reason: %v",
+ err,
+ )
+ return reconcile.Result{}, err
+ }
+
+ l.Info(
+ "On-premises resources have been created",
+ "cluster name", redis.Spec.Name,
+ "on-premises Spec", redis.Spec.OnPremisesSpec,
+ "cluster ID", redis.Status.ID,
+ )
+ return models.ExitReconcile, nil
+ }
+
err = r.startClusterBackupsJob(redis)
if err != nil {
- logger.Error(err, "Cannot start Redis cluster backups check job",
+ l.Error(err, "Cannot start Redis cluster backups check job",
"cluster ID", redis.Status.ID,
)
@@ -255,7 +341,7 @@ func (r *RedisReconciler) handleCreateCluster(
err = r.startUsersCreationJob(redis)
if err != nil {
- logger.Error(err, "Failed to start user creation job")
+ l.Error(err, "Failed to start user creation job")
r.EventRecorder.Eventf(redis, models.Warning, models.CreationFailed,
"User creation job is failed. Reason: %v", err,
)
@@ -267,7 +353,7 @@ func (r *RedisReconciler) handleCreateCluster(
}
}
- logger.Info(
+ l.Info(
"Redis resource has been created",
"cluster name", redis.Name,
"cluster ID", redis.Status.ID,
@@ -293,11 +379,11 @@ func (r *RedisReconciler) startUsersCreationJob(cluster *v1beta1.Redis) error {
func (r *RedisReconciler) handleUpdateCluster(
ctx context.Context,
redis *v1beta1.Redis,
- logger logr.Logger,
+ l logr.Logger,
) (reconcile.Result, error) {
iData, err := r.API.GetRedis(redis.Status.ID)
if err != nil {
- logger.Error(
+ l.Error(
err, "Cannot get Redis cluster from the Instaclustr API",
"cluster name", redis.Spec.Name,
"cluster ID", redis.Status.ID,
@@ -313,14 +399,14 @@ func (r *RedisReconciler) handleUpdateCluster(
iRedis, err := redis.FromInstAPI(iData)
if err != nil {
- logger.Error(
+ l.Error(
err, "Cannot convert Redis cluster from the Instaclustr API",
"cluster name", redis.Spec.Name,
"cluster ID", redis.Status.ID,
)
r.EventRecorder.Eventf(
- redis, models.Warning, models.ConvertionFailed,
+ redis, models.Warning, models.ConversionFailed,
"Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v",
err,
)
@@ -328,17 +414,17 @@ func (r *RedisReconciler) handleUpdateCluster(
}
if redis.Annotations[models.ExternalChangesAnnotation] == models.True {
- return r.handleExternalChanges(redis, iRedis, logger)
+ return r.handleExternalChanges(redis, iRedis, l)
}
if redis.Spec.ClusterSettingsNeedUpdate(iRedis.Spec.Cluster) {
- logger.Info("Updating cluster settings",
+ l.Info("Updating cluster settings",
"instaclustr description", iRedis.Spec.Description,
"instaclustr two factor delete", iRedis.Spec.TwoFactorDelete)
err = r.API.UpdateClusterSettings(redis.Status.ID, redis.Spec.ClusterSettingsUpdateToInstAPI())
if err != nil {
- logger.Error(err, "Cannot update cluster settings",
+ l.Error(err, "Cannot update cluster settings",
"cluster ID", redis.Status.ID, "cluster spec", redis.Spec)
r.EventRecorder.Eventf(redis, models.Warning, models.UpdateFailed,
"Cannot update cluster settings. Reason: %v", err)
@@ -348,14 +434,14 @@ func (r *RedisReconciler) handleUpdateCluster(
}
if !redis.Spec.IsEqual(iRedis.Spec) {
- logger.Info("Update request to Instaclustr API has been sent",
+ l.Info("Update request to Instaclustr API has been sent",
"spec data centres", redis.Spec.DataCentres,
"resize settings", redis.Spec.ResizeSettings,
)
err = r.API.UpdateRedis(redis.Status.ID, redis.Spec.DCsUpdateToInstAPI())
if err != nil {
- logger.Error(err, "Cannot update Redis cluster data centres",
+ l.Error(err, "Cannot update Redis cluster data centres",
"cluster name", redis.Spec.Name,
"cluster status", redis.Status,
"data centres", redis.Spec.DataCentres,
@@ -370,7 +456,7 @@ func (r *RedisReconciler) handleUpdateCluster(
patch := redis.NewPatch()
redis.Annotations[models.UpdateQueuedAnnotation] = models.True
if err := r.Patch(ctx, redis, patch); err != nil {
- logger.Error(err, "Cannot patch metadata",
+ l.Error(err, "Cannot patch metadata",
"cluster name", redis.Spec.Name,
"cluster metadata", redis.ObjectMeta,
)
@@ -388,7 +474,7 @@ func (r *RedisReconciler) handleUpdateCluster(
err = handleUsersChanges(ctx, r.Client, r, redis)
if err != nil {
- logger.Error(err, "Failed to handle users changes")
+ l.Error(err, "Failed to handle users changes")
r.EventRecorder.Eventf(redis, models.Warning, models.PatchFailed,
"Handling users changes is failed. Reason: %w", err,
)
@@ -400,7 +486,7 @@ func (r *RedisReconciler) handleUpdateCluster(
redis.Annotations[models.UpdateQueuedAnnotation] = ""
err = r.Patch(ctx, redis, patch)
if err != nil {
- logger.Error(err, "Cannot patch Redis cluster after update",
+ l.Error(err, "Cannot patch Redis cluster after update",
"cluster name", redis.Spec.Name,
"cluster metadata", redis.ObjectMeta,
)
@@ -413,7 +499,7 @@ func (r *RedisReconciler) handleUpdateCluster(
return reconcile.Result{}, err
}
- logger.Info(
+ l.Info(
"Cluster has been updated",
"cluster name", redis.Spec.Name,
"cluster ID", redis.Status.ID,
@@ -463,12 +549,12 @@ func (r *RedisReconciler) handleExternalChanges(redis, iRedis *v1beta1.Redis, l
func (r *RedisReconciler) handleDeleteCluster(
ctx context.Context,
redis *v1beta1.Redis,
- logger logr.Logger,
+ l logr.Logger,
) (reconcile.Result, error) {
_, err := r.API.GetRedis(redis.Status.ID)
if err != nil && !errors.Is(err, instaclustr.NotFound) {
- logger.Error(err, "Cannot get Redis cluster status from Instaclustr",
+ l.Error(err, "Cannot get Redis cluster status from Instaclustr",
"cluster ID", redis.Status.ID,
"cluster name", redis.Spec.Name,
)
@@ -482,13 +568,13 @@ func (r *RedisReconciler) handleDeleteCluster(
}
if !errors.Is(err, instaclustr.NotFound) {
- logger.Info("Sending cluster deletion to the Instaclustr API",
+ l.Info("Sending cluster deletion to the Instaclustr API",
"cluster name", redis.Spec.Name,
"cluster ID", redis.Status.ID)
err = r.API.DeleteCluster(redis.Status.ID, instaclustr.RedisEndpoint)
if err != nil {
- logger.Error(err, "Cannot delete Redis cluster",
+ l.Error(err, "Cannot delete Redis cluster",
"cluster name", redis.Spec.Name,
"cluster status", redis.Status.State,
)
@@ -511,7 +597,7 @@ func (r *RedisReconciler) handleDeleteCluster(
redis.Annotations[models.ClusterDeletionAnnotation] = models.Triggered
err = r.Patch(ctx, redis, patch)
if err != nil {
- logger.Error(err, "Cannot patch cluster resource",
+ l.Error(err, "Cannot patch cluster resource",
"cluster name", redis.Spec.Name,
"cluster state", redis.Status.State)
r.EventRecorder.Eventf(redis, models.Warning, models.PatchFailed,
@@ -521,25 +607,41 @@ func (r *RedisReconciler) handleDeleteCluster(
return reconcile.Result{}, err
}
- logger.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", redis.Status.ID)
+ l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", redis.Status.ID)
r.EventRecorder.Event(redis, models.Normal, models.DeletionStarted,
"Two-Factor Delete is enabled, please confirm cluster deletion via email or phone.")
return models.ExitReconcile, nil
}
+ if redis.Spec.OnPremisesSpec != nil {
+ err = deleteOnPremResources(ctx, r.Client, redis.Status.ID, redis.Namespace)
+ if err != nil {
+ l.Error(err, "Cannot delete cluster on-premises resources",
+ "cluster ID", redis.Status.ID)
+ r.EventRecorder.Eventf(redis, models.Warning, models.DeletionFailed,
+ "Cluster on-premises resources deletion is failed. Reason: %v", err)
+ return reconcile.Result{}, err
+ }
+
+ l.Info("Cluster on-premises resources are deleted",
+ "cluster ID", redis.Status.ID)
+ r.EventRecorder.Eventf(redis, models.Normal, models.Deleted,
+ "Cluster on-premises resources are deleted")
+ r.Scheduler.RemoveJob(redis.GetJobID(scheduler.OnPremisesIPsChecker))
+ }
}
r.Scheduler.RemoveJob(redis.GetJobID(scheduler.StatusChecker))
r.Scheduler.RemoveJob(redis.GetJobID(scheduler.BackupsChecker))
- logger.Info("Deleting cluster backup resources",
+ l.Info("Deleting cluster backup resources",
"cluster ID", redis.Status.ID,
)
err = r.deleteBackups(ctx, redis.Status.ID, redis.Namespace)
if err != nil {
- logger.Error(err, "Cannot delete cluster backup resources",
+ l.Error(err, "Cannot delete cluster backup resources",
"cluster ID", redis.Status.ID,
)
r.EventRecorder.Eventf(
@@ -555,13 +657,13 @@ func (r *RedisReconciler) handleDeleteCluster(
"Cluster backup resources are deleted",
)
- logger.Info("Cluster backup resources are deleted",
+ l.Info("Cluster backup resources are deleted",
"cluster ID", redis.Status.ID,
)
err = detachUsers(ctx, r.Client, r, redis)
if err != nil {
- logger.Error(err, "Failed to detach users from the cluster")
+ l.Error(err, "Failed to detach users from the cluster")
r.EventRecorder.Eventf(redis, models.Warning, models.DeletionFailed,
"Detaching users from the cluster is failed. Reason: %w", err,
)
@@ -573,7 +675,7 @@ func (r *RedisReconciler) handleDeleteCluster(
redis.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent
err = r.Patch(ctx, redis, patch)
if err != nil {
- logger.Error(err, "Cannot patch Redis cluster metadata after finalizer removal",
+ l.Error(err, "Cannot patch Redis cluster metadata after finalizer removal",
"cluster name", redis.Spec.Name,
"cluster ID", redis.Status.ID,
)
@@ -588,7 +690,7 @@ func (r *RedisReconciler) handleDeleteCluster(
err = exposeservice.Delete(r.Client, redis.Name, redis.Namespace)
if err != nil {
- logger.Error(err, "Cannot delete Redis cluster expose service",
+ l.Error(err, "Cannot delete Redis cluster expose service",
"cluster ID", redis.Status.ID,
"cluster name", redis.Spec.Name,
)
@@ -596,7 +698,7 @@ func (r *RedisReconciler) handleDeleteCluster(
return reconcile.Result{}, err
}
- logger.Info("Redis cluster was deleted",
+ l.Info("Redis cluster was deleted",
"cluster name", redis.Spec.Name,
"cluster ID", redis.Status.ID,
)
@@ -609,6 +711,17 @@ func (r *RedisReconciler) handleDeleteCluster(
return models.ExitReconcile, nil
}
+func (r *RedisReconciler) startClusterOnPremisesIPsJob(redis *v1beta1.Redis, b *onPremisesBootstrap) error {
+ job := newWatchOnPremisesIPsJob(redis.Kind, b)
+
+ err := r.Scheduler.ScheduleJob(redis.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
func (r *RedisReconciler) startClusterStatusJob(cluster *v1beta1.Redis) error {
job := r.newWatchStatusJob(cluster)
@@ -632,7 +745,7 @@ func (r *RedisReconciler) startClusterBackupsJob(cluster *v1beta1.Redis) error {
}
func (r *RedisReconciler) newUsersCreationJob(redis *v1beta1.Redis) scheduler.Job {
- logger := log.Log.WithValues("component", "redisUsersCreationJob")
+ l := log.Log.WithValues("component", "redisUsersCreationJob")
return func() error {
ctx := context.Background()
@@ -649,7 +762,7 @@ func (r *RedisReconciler) newUsersCreationJob(redis *v1beta1.Redis) scheduler.Jo
}
if redis.Status.State != models.RunningStatus {
- logger.Info("User creation job is scheduled")
+ l.Info("User creation job is scheduled")
r.EventRecorder.Eventf(redis, models.Normal, models.CreationFailed,
"User creation job is scheduled, cluster is not in the running state",
)
@@ -658,14 +771,14 @@ func (r *RedisReconciler) newUsersCreationJob(redis *v1beta1.Redis) scheduler.Jo
err = handleUsersChanges(ctx, r.Client, r, redis)
if err != nil {
- logger.Error(err, "Failed to create users")
+ l.Error(err, "Failed to create users")
r.EventRecorder.Eventf(redis, models.Warning, models.PatchFailed,
"Creating users is failed. Reason: %w", err,
)
return err
}
- logger.Info("User creation job successfully finished")
+ l.Info("User creation job successfully finished")
r.EventRecorder.Eventf(redis, models.Normal, models.Created,
"User creation job successfully finished",
)
diff --git a/doc/clusters/cassandra.md b/doc/clusters/cassandra.md
index 57fd9fbaf..53b334d0b 100644
--- a/doc/clusters/cassandra.md
+++ b/doc/clusters/cassandra.md
@@ -2,22 +2,23 @@
## Available spec fields
-| Field | Type | Description |
-|---------------------------|------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| name | string
**required** | Cluster name. Should have length from 3 to 32 symbols. |
-| version | string
**required** | Cassandra instance version.
**Available versions**: `3.11.15`, `3.11.16`, `4.0.10`, `4.0.11`, `4.1.3`. |
-| pciCompliance | bool
**required** | Creates a PCI compliant cluster, see [PCI Compliance](https://www.instaclustr.com/support/documentation/useful-information/pci-compliance/) |
-| description | string
| A description of the cluster |
-| privateNetworkCluster | bool
**required** | Creates the cluster with private network only, see [Private Network Clusters](https://www.instaclustr.com/support/documentation/useful-information/private-network-clusters/). |
-| slaTier | string
**required** | SLA Tier of the cluster. Non-production clusters may receive lower priority support and reduced SLAs. Production tier is not available when using Developer class nodes. See [SLA Tier](https://www.instaclustr.com/support/documentation/useful-information/sla-tier/) for more information.
**Enum**: `PRODUCTION`, `NON_PRODUCTION`. |
-| twoFactorDelete | Array of objects ([TwoFactorDelete](#TwoFactorDeleteObject))
_mutable_ | Contacts that will be contacted when cluster request is sent. |
-| schemaRegistry | Array of objects ([KafkaSchemaRegistryDetails](#KafkaSchemaRegistryDetailsObject))
_mutable_ | Adds the specified version of Kafka Schema Registry to this Kafka cluster. |
-| luceneEnabled | bool
**required** | Adds Apache Lucene to the Cassandra cluster. |
-| passwordAndUserAuth | bool
**required** | Enables Password Authentication and User Authorization. |
-| bundledUseOnly | bool
**required** | Provision this cluster for Bundled Use only. |
-| restoreFrom | Object ([CassandraRestoreFrom](#CassandraRestoreFromObject)) | Triggers a restore cluster operation. |
-| dataCentres | Array of objects ([CassandraDataCentre](#CassandraDataCentreObject))
**required** | Object fields are described below as a bulleted list. |
-| resizeSettings | Array of objects ([ResizeSettings](#ResizeSettingsObject))
_mutable_ | Settings to determine how resize requests will be performed for the cluster. |
+| Field | Type | Description |
+|-----------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| name | string
**required** | Cluster name. Should have length from 3 to 32 symbols. |
+| version | string
**required** | Cassandra instance version.
**Available versions**: `3.11.15`, `3.11.16`, `4.0.10`, `4.0.11`, `4.1.3`. |
+| pciCompliance | bool
**required** | Creates a PCI compliant cluster, see [PCI Compliance](https://www.instaclustr.com/support/documentation/useful-information/pci-compliance/) |
+| description | string
| A description of the cluster |
+| privateNetworkCluster | bool
**required** | Creates the cluster with private network only, see [Private Network Clusters](https://www.instaclustr.com/support/documentation/useful-information/private-network-clusters/). |
+| slaTier | string
**required** | SLA Tier of the cluster. Non-production clusters may receive lower priority support and reduced SLAs. Production tier is not available when using Developer class nodes. See [SLA Tier](https://www.instaclustr.com/support/documentation/useful-information/sla-tier/) for more information.
**Enum**: `PRODUCTION`, `NON_PRODUCTION`. |
+| twoFactorDelete | Array of objects ([TwoFactorDelete](#TwoFactorDeleteObject))
_mutable_ | Contacts that will be contacted when cluster request is sent. |
+| schemaRegistry | Array of objects ([KafkaSchemaRegistryDetails](#KafkaSchemaRegistryDetailsObject))
_mutable_ | Adds the specified version of Kafka Schema Registry to this Kafka cluster. |
+| luceneEnabled | bool
**required** | Adds Apache Lucene to the Cassandra cluster. |
+| passwordAndUserAuth | bool
**required** | Enables Password Authentication and User Authorization. |
+| bundledUseOnly | bool
**required** | Provision this cluster for Bundled Use only. |
+| restoreFrom | Object ([CassandraRestoreFrom](#CassandraRestoreFromObject)) | Triggers a restore cluster operation. |
+| dataCentres | Array of objects ([CassandraDataCentre](#CassandraDataCentreObject))
**required** | Object fields are described below as a bulleted list. |
+| resizeSettings | Array of objects ([ResizeSettings](#ResizeSettingsObject))
_mutable_ | Settings to determine how resize requests will be performed for the cluster. |
+| onPremisesSpec | Object ([OnPremisesSpec](#OnPremisesSpecObject)) | Specifies settings to provision on-premises cluster inside K8s cluster. |
### TwoFactorDeleteObject
| Field | Type | Description |
@@ -80,6 +81,27 @@
| customVpcId | string | Custom VPC ID to which the restored cluster will be allocated.
Either restoreToSameVpc or customVpcId must be provided. |
| customVpcNetwork | string | CIDR block in which the cluster will be allocated for a custom VPC. |
+### OnPremisesSpecObject
+
+| Field | Type | Description |
+|--------------------|------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| storageClassName | string
**required** | Name of the storage class that will be used to provision disks for on-premises nodes. |
+| osDiskSize | string
**required** | Disk size on which OS will be installed. |
+| dataDiskSize | string
**required** | Disk size on which on-premises cluster data will be stored. |
+| sshGatewayCPU | int64 | Amount of CPU that will be dedicated to provision SSH Gateway node (only for private clusters). |
+| sshGatewayMemory | string | Amount of RAM that will be dedicated to provision SSH Gateway node (only for private clusters). |
+| nodeCPU | int64
**required** | Amount of CPU that will be dedicated to provision on-premises worker node. |
+| nodeMemory | string
**required** | Amount of RAM that will be dedicated to provision on-premises worker node |
+| osImageURL | string
**required** | OS image URL that will be use to dynamically provision disks with preinstalled OS (more info can be found [here](https://kubevirt.io/2020/KubeVirt-VM-Image-Usage-Patterns.html)). |
+| cloudInitScriptRef | Object ([Reference](#ReferenceObject))
**required** | Reference to the secret with cloud-init script (must be located in the same namespace with the cluster). Example can be found [here](#CloudInitScript). |
+
+### ReferenceObject
+
+| Field | Type | Description |
+|-----------|--------|---------------------------------------------------|
+| name | string | Name of the cloud-init secret. |
+| namespace | string | Namespace in which the cloud-init secret located. |
+
## Cluster create flow
To create a Cassandra cluster instance you need to prepare the yaml manifest. Here is an example:
@@ -111,6 +133,82 @@ spec:
slaTier: "NON_PRODUCTION"
```
+Or if you want to create an on-premises cluster:
+```yaml
+# cassandra.yaml
+apiVersion: clusters.instaclustr.com/v1beta1
+kind: Cassandra
+metadata:
+ name: cassandra-on-prem-cluster
+spec:
+ name: "cassandra-on-prem-cluster"
+ version: "4.0.10"
+ privateNetworkCluster: true
+ onPremisesSpec:
+ storageClassName: managed-csi-premium
+ osDiskSize: 20Gi
+ dataDiskSize: 200Gi
+ sshGatewayCPU: 2
+ sshGatewayMemory: 4096Mi
+ nodeCPU: 2
+ nodeMemory: 8192Mi
+ osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw"
+ cloudInitScriptRef:
+ namespace: default
+ name: cloud-init-secret
+ dataCentres:
+ - name: "onPremCassandra"
+ region: "CLIENT_DC" # Don't change if you want to run on-premises
+ cloudProvider: "ONPREMISES" # Don't change if you want to run on-premises
+ continuousBackup: false
+ nodesNumber: 2
+ replicationFactor: 2
+ privateIpBroadcastForDiscovery: false
+ network: "192.168.0.0/16"
+ tags:
+ "onprem": "test"
+ clientToClusterEncryption: false
+ nodeSize: "CAS-PRD-OP.4.8-200"
+ pciCompliance: false
+ luceneEnabled: false
+ passwordAndUserAuth: false
+ slaTier: "NON_PRODUCTION"
+```
+
+Also, don't forget to create cloud-init script firstly.
+
+### CloudInitScript
+
+cloud-init.sh:
+```shell
+#!/bin/bash
+
+export NEW_PASS="qwerty12345"
+export SSH_PUB_KEY=""
+export BOOTSTRAP_SSH_KEY="ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAEAQDgeaO3zkY5v1dww3fFONPzUnEgIqJ4kUK0Usu8iFdp+TWIulw9dDeQHa+PdWXP97l5Vv1mG9ipqShEIu7/2bp13KxSblWX4iV1MYZbtimhY3UDOsPn1G3E1Ipis6y+/tosDAm8LoWaGEMcLuE5UjP6gs6K57rCEjkVGjg7vjhIypAMC0J2N2+CxK9o/Y1+LZAec+FL5cmSJoajoo9y/VYJjz/a52jJ93wRafD2uu6ObAl5gkN/+gqY4IJFSMx20sPsIRXdbiBNDqiap56ibHHPKTeZhRbdXvZfjYkHtutXnSM2xn7BjnV8CguISxS3rXlzlzRVYwSUjnKUf5SKBbeyZbCokx271vCeUd5EXfHphvW6FIOna2AI5lpCSYkw5Kho3HaPi2NjXJ9i2dCr1zpcZpCiettDuaEjxR0Cl4Jd6PrAiAEZ0Ns0u2ysVhudshVzQrq6qdd7W9/MLjbDIHdTToNjFLZA6cbE0MQf18LXwJAl+G/QrXgcVaiopUmld+68JL89Xym55LzkMhI0NiDtWufawd/NiZ6jm13Z3+atvnOimdyuqBYeFWgbtcxjs0yN9v7h7PfPh6TbiQYFx37DCfQiIucqx1GWmMijmd7HMY6Nv3UvnoTUTSn4yz1NxhEpC61N+iAInZDpeJEtULSzlEMWlbzL4t5cF+Rm1dFdq3WpZt1mi8F4DgrsgZEuLGAw22RNW3++EWYFUNnJXaYyctPrMpWQktr4DB5nQGIHF92WR8uncxTNUXfWuT29O9e+bFYh1etmq8rsCoLcxN0zFHWvcECK55aE+47lfNAR+HEjuwYW10mGU/pFmO0F9FFmcQRSw4D4tnVUgl3XjKe3bBiTa4lUrzrKkLZ6n9/buW2e7c3jbjmXdPh2R+2Msr/vfuWs9glxQf+CYEbBW6Ye4pekIyI77SaB/bVhaHtXutKxm+QWdNle8aeqiA8Ji1Ml+s75vIg+n5v6viCnl5aV33xHRFpGQJzj2ktsXl9P9d5kgal9eXJYTywC2SnVbZVLb6FGN4kPZTVwX1f+u7v7JCm4YWlbQZtwwiXKjs99AVtQnBWqQvUH5sFUkVXlHA1Y9W6wlup0r+F6URL+7Yw+d0dHByfevrJg3pvmpLb3sEpjIAZodW3dIUReE7Ku3s/q/O9foFnfRBnCcZ2QsnxI5pqNrbrundD1ApOnNXEvICvPXHBBQ44cW0hzAO+WxY5VxyG8y/kXnb48G9efkIQFkNaITJrU9SiOk6bFP4QANdS/pmaSLjJIsHixa+7vmYjRy1SVoQ/39vDUnyCbqKtO56QMH32hQLRO3Vk7NVG6o4dYjFkiaMSaqVlHKMkJQHVzlK2PW9/fjVXfkAHmmhoD debian"
+echo "debian:$NEW_PASS" | chpasswd
+echo "root:$NEW_PASS" | sudo chpasswd root
+sudo echo "$SSH_PUB_KEY" > /home/debian/.ssh/authorized_keys
+sudo echo "$BOOTSTRAP_SSH_KEY" >> /home/debian/.ssh/authorized_keys
+sudo chown -R debian: /home/debian/.ssh
+sudo cp /usr/share/doc/apt/examples/sources.list /etc/apt/sources.list
+data_device=$(lsblk -dfn -o NAME,SERIAL | awk '$2 == "DATADISK" {print $1}')
+sudo mkfs -t ext4 /dev/"${data_device}"
+```
+
+Create the base64 encoded string with the script using `cat cloud-init.sh | base64 -w0` command and create a secret using this yaml manifest:
+
+```yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: cloud-init-secret
+data:
+ userdata:
+```
+
+Use `kubectl apply -f cloud-init-secret.yaml` command to create the secret.
+
Next, you need to apply this manifest in your K8s cluster. This will create a custom resource instance inside (more info about an apply command you can find [here](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply)):
```console
kubectl apply -f cassandra.yaml
diff --git a/main.go b/main.go
index f11e46971..c29ce497b 100644
--- a/main.go
+++ b/main.go
@@ -22,13 +22,12 @@ import (
"time"
"go.uber.org/zap/zapcore"
+ "k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
-
- "k8s.io/apimachinery/pkg/runtime"
- // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
- // to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
+ virtcorev1 "kubevirt.io/api/core/v1"
+ cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log"
@@ -56,6 +55,8 @@ func init() {
utilruntime.Must(clustersv1beta1.AddToScheme(scheme))
utilruntime.Must(clusterresourcesv1beta1.AddToScheme(scheme))
utilruntime.Must(kafkamanagementv1beta1.AddToScheme(scheme))
+ utilruntime.Must(cdiv1beta1.AddToScheme(scheme))
+ utilruntime.Must(virtcorev1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
}
diff --git a/pkg/models/on_premises.go b/pkg/models/on_premises.go
new file mode 100644
index 000000000..3d0dbd278
--- /dev/null
+++ b/pkg/models/on_premises.go
@@ -0,0 +1,93 @@
+package models
+
+const (
+ ONPREMISES = "ONPREMISES"
+ CLIENTDC = "CLIENT_DC"
+
+ VirtualMachineKind = "VirtualMachine"
+ DVKind = "DataVolume"
+ ServiceKind = "Service"
+ KubevirtV1APIVersion = "kubevirt.io/v1"
+ CDIKubevirtV1beta1APIVersion = "cdi.kubevirt.io/v1beta1"
+
+ KubevirtSubdomain = "kubevirt"
+ KubevirtDomainLabel = "kubevirt.io/domain"
+ NodeIDLabel = "nodeID"
+ NodeRackLabel = "nodeRack"
+ NodeLabel = "node"
+ NodeOSDVPrefix = "node-os-data-volume-pvc"
+ NodeDVPrefix = "node-data-volume-pvc"
+ NodeVMPrefix = "node-vm"
+ NodeSvcPrefix = "node-service"
+ WorkerNode = "worker-node"
+ GatewayDVPrefix = "gateway-data-volume-pvc"
+ GatewayVMPrefix = "gateway-vm"
+ GatewaySvcPrefix = "gateway-service"
+ GatewayRack = "ssh-gateway-rack"
+ IgnitionScriptSecretPrefix = "ignition-script-secret"
+ DataDisk = "data-disk"
+
+ Boot = "boot"
+ Storage = "storage"
+ CPU = "cpu"
+ Memory = "memory"
+ Virtio = "virtio"
+ Native = "native"
+ None = "none"
+ Script = "script"
+ IgnitionDisk = "ignition"
+ Default = "default"
+ CloudInit = "cloud-init"
+ DataDiskSerial = "DATADISK"
+ IgnitionSerial = "IGNITION"
+
+ LBType = "LoadBalancer"
+ SSH = "ssh"
+ Port22 = 22
+
+ // Cassandra
+
+ CassandraInterNode = "cassandra-inter-node"
+ CassandraSSL = "cassandra-ssl"
+ CassandraCQL = "cassandra-cql"
+ CassandraJMX = "cassandra-jmx"
+ Port7000 = 7000
+ Port7001 = 7001
+ Port7199 = 7199
+ Port9042 = 9042
+
+ // Kafka
+
+ KafkaClient = "kafka-client"
+ KafkaControlPlane = "kafka-control-plane"
+ KafkaBroker = "kafka-broker"
+ Port9092 = 9092
+ Port9093 = 9093
+ Port9094 = 9094
+
+ // KafkaConnect
+
+ KafkaConnectAPI = "kafka-connect-API"
+ Port8083 = 8083
+
+ // Cadence
+
+ CadenceTChannel = "cadence-tchannel"
+ CadenceGRPC = "cadence-grpc"
+ CadenceWeb = "cadence-web"
+ Port7933 = 7933
+ Port7833 = 7833
+ Port8088 = 8088
+
+ // PostgreSQL
+
+ PostgreSQLDB = "postgresql-db"
+ Port5432 = 5432
+
+ // Redis
+
+ RedisDB = "redis-db"
+ RedisBus = "redis-bus"
+ Port6379 = 6379
+ Port16379 = 16379
+)
diff --git a/pkg/models/operator.go b/pkg/models/operator.go
index 3b46ad639..399d61c49 100644
--- a/pkg/models/operator.go
+++ b/pkg/models/operator.go
@@ -132,7 +132,7 @@ const (
CreationFailed = "CreationFailed"
FetchFailed = "FetchFailed"
GenerateFailed = "GenerateFailed"
- ConvertionFailed = "ConvertionFailed"
+ ConversionFailed = "ConversionFailed"
ValidationFailed = "ValidationFailed"
UpdateFailed = "UpdateFailed"
ExternalChanges = "ExternalChanges"
diff --git a/pkg/models/validation.go b/pkg/models/validation.go
index d8afd0257..410275d55 100644
--- a/pkg/models/validation.go
+++ b/pkg/models/validation.go
@@ -59,6 +59,8 @@ var (
DependencyVPCs = []string{"TARGET_VPC", "VPC_PEERED", "SEPARATE_VPC"}
EncryptionKeyAliasRegExp = "^[a-zA-Z0-9_-]{1}[a-zA-Z0-9 _-]*$"
OpenSearchBindingIDPattern = "[\\w-]+"
+ MemoryRegExp = "^\\d+(Ei|Pi|Ti|Gi|Mi|Ki)?$"
+ StorageRegExp = "^\\d+(Gi|Ti|Pi|Ei)?$"
CassandraReplicationFactors = []int{2, 3, 5}
KafkaReplicationFactors = []int{3, 5}
diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go
index 0d025b664..a8622ed43 100644
--- a/pkg/scheduler/scheduler.go
+++ b/pkg/scheduler/scheduler.go
@@ -28,9 +28,12 @@ var ClusterStatusInterval time.Duration
var ClusterBackupsInterval time.Duration
var UserCreationInterval time.Duration
-const StatusChecker = "statusChecker"
-const BackupsChecker = "backupsChecker"
-const UserCreator = "userCreator"
+const (
+ StatusChecker = "statusChecker"
+ BackupsChecker = "backupsChecker"
+ UserCreator = "userCreator"
+ OnPremisesIPsChecker = "onPremisesIPsChecker"
+)
type Job func() error
diff --git a/scripts/cloud-init-script-example.sh b/scripts/cloud-init-script-example.sh
new file mode 100644
index 000000000..96e5e757a
--- /dev/null
+++ b/scripts/cloud-init-script-example.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+export NEW_PASS="qwerty12345"
+export SSH_PUB_KEY=""
+export BOOTSTRAP_SSH_KEY="ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAEAQDgeaO3zkY5v1dww3fFONPzUnEgIqJ4kUK0Usu8iFdp+TWIulw9dDeQHa+PdWXP97l5Vv1mG9ipqShEIu7/2bp13KxSblWX4iV1MYZbtimhY3UDOsPn1G3E1Ipis6y+/tosDAm8LoWaGEMcLuE5UjP6gs6K57rCEjkVGjg7vjhIypAMC0J2N2+CxK9o/Y1+LZAec+FL5cmSJoajoo9y/VYJjz/a52jJ93wRafD2uu6ObAl5gkN/+gqY4IJFSMx20sPsIRXdbiBNDqiap56ibHHPKTeZhRbdXvZfjYkHtutXnSM2xn7BjnV8CguISxS3rXlzlzRVYwSUjnKUf5SKBbeyZbCokx271vCeUd5EXfHphvW6FIOna2AI5lpCSYkw5Kho3HaPi2NjXJ9i2dCr1zpcZpCiettDuaEjxR0Cl4Jd6PrAiAEZ0Ns0u2ysVhudshVzQrq6qdd7W9/MLjbDIHdTToNjFLZA6cbE0MQf18LXwJAl+G/QrXgcVaiopUmld+68JL89Xym55LzkMhI0NiDtWufawd/NiZ6jm13Z3+atvnOimdyuqBYeFWgbtcxjs0yN9v7h7PfPh6TbiQYFx37DCfQiIucqx1GWmMijmd7HMY6Nv3UvnoTUTSn4yz1NxhEpC61N+iAInZDpeJEtULSzlEMWlbzL4t5cF+Rm1dFdq3WpZt1mi8F4DgrsgZEuLGAw22RNW3++EWYFUNnJXaYyctPrMpWQktr4DB5nQGIHF92WR8uncxTNUXfWuT29O9e+bFYh1etmq8rsCoLcxN0zFHWvcECK55aE+47lfNAR+HEjuwYW10mGU/pFmO0F9FFmcQRSw4D4tnVUgl3XjKe3bBiTa4lUrzrKkLZ6n9/buW2e7c3jbjmXdPh2R+2Msr/vfuWs9glxQf+CYEbBW6Ye4pekIyI77SaB/bVhaHtXutKxm+QWdNle8aeqiA8Ji1Ml+s75vIg+n5v6viCnl5aV33xHRFpGQJzj2ktsXl9P9d5kgal9eXJYTywC2SnVbZVLb6FGN4kPZTVwX1f+u7v7JCm4YWlbQZtwwiXKjs99AVtQnBWqQvUH5sFUkVXlHA1Y9W6wlup0r+F6URL+7Yw+d0dHByfevrJg3pvmpLb3sEpjIAZodW3dIUReE7Ku3s/q/O9foFnfRBnCcZ2QsnxI5pqNrbrundD1ApOnNXEvICvPXHBBQ44cW0hzAO+WxY5VxyG8y/kXnb48G9efkIQFkNaITJrU9SiOk6bFP4QANdS/pmaSLjJIsHixa+7vmYjRy1SVoQ/39vDUnyCbqKtO56QMH32hQLRO3Vk7NVG6o4dYjFkiaMSaqVlHKMkJQHVzlK2PW9/fjVXfkAHmmhoD debian"
+echo "debian:$NEW_PASS" | chpasswd
+echo "root:$NEW_PASS" | sudo chpasswd root
+sudo echo "$SSH_PUB_KEY" > /home/debian/.ssh/authorized_keys
+sudo echo "$BOOTSTRAP_SSH_KEY" >> /home/debian/.ssh/authorized_keys
+sudo chown -R debian: /home/debian/.ssh
+sudo cp /usr/share/doc/apt/examples/sources.list /etc/apt/sources.list
+data_device=$(lsblk -dfn -o NAME,SERIAL | awk '$2 == "DATADISK" {print $1}')
+sudo mkfs -t ext4 /dev/"${data_device}"
\ No newline at end of file
diff --git a/scripts/cloud-init-secret.yaml b/scripts/cloud-init-secret.yaml
new file mode 100644
index 000000000..ef7ad5d0d
--- /dev/null
+++ b/scripts/cloud-init-secret.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: instaclustr-cloud-init-secret
+data:
+ userdata: IyEvYmluL2Jhc2gKCmV4cG9ydCBORVdfUEFTUz0icXdlcnR5MTIzNDUiCmV4cG9ydCBTU0hfUFVCX0tFWT0ic3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDOS9TVVFJd0o1OEZsNjdsTXNlNFlJdldUalc1eGR2d0xrQ1h2V0hjbnhnb1kvODRXbXBpVTU4WDcwbWxFWlRnQzBDNVFNNkhPZERmdUdzRThqb1VETHNCYTNpMFNRbkszM2dJeTZ0TVl4UmNtaE40RTJXNEpPakNrZWZvREQwMHdaYWlWTzRvRVl1bjluUlJ4MEc5SUNQSFhSak1uSzg2MG1uWTJXQlhReXp2RUxyamNISnFCajhmRTV4T0R0R1VMRi9mZXVzWWFVc2RNaFZLTTJsVkk5V1B0THlvRGFhVHhuZC9HZ0JoeStMaWl4cXJoRGhPb1NHOG5nM3lmTHZzMW5zbzcxV0Q0Si9GRFVtVm41V3V3Q2NrU1JZZUNCdVlXRHBHVWJ1OFdFdHJDdTZiVzJqSFRReVdJdjBYaU5nemttcUc2U3lTenJEeDRuUW9PSnRxYmFGVCtsTHptcDJaV0Q5c2tUZFdOU0FJUUxDUlBlSFVQUTllOURYWEZTZklROGQ4QWdpQlRvYmVlaVd4WFo0cXUrODVEYnIvWWdVTWkwUlp1OXVCMFl1R0N6cFlycWtQSFNPUXRpaHFVWFc4Q3pvQWNyOHI1SEVHUEVjRytnVzJ1K3NScE5XM21zTWVseEx0RHBySTNFd21odktRL2FXeWo5d0ZDcWdyTzlTSmM9IGRhbmlsQGRhbmlsLW1pbnQiCmV4cG9ydCBCT09UU1RSQVBfU1NIX0tFWT0ic3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFFQVFEZ2VhTzN6a1k1djFkd3czZkZPTlB6VW5FZ0lxSjRrVUswVXN1OGlGZHArVFdJdWx3OWREZVFIYStQZFdYUDk3bDVWdjFtRzlpcHFTaEVJdTcvMmJwMTNLeFNibFdYNGlWMU1ZWmJ0aW1oWTNVRE9zUG4xRzNFMUlwaXM2eSsvdG9zREFtOExvV2FHRU1jTHVFNVVqUDZnczZLNTdyQ0Vqa1ZHamc3dmpoSXlwQU1DMEoyTjIrQ3hLOW8vWTErTFpBZWMrRkw1Y21TSm9ham9vOXkvVllKanovYTUyako5M3dSYWZEMnV1Nk9iQWw1Z2tOLytncVk0SUpGU014MjBzUHNJUlhkYmlCTkRxaWFwNTZpYkhIUEtUZVpoUmJkWHZaZmpZa0h0dXRYblNNMnhuN0JqblY4Q2d1SVN4UzNyWGx6bHpSVll3U1VqbktVZjVTS0JiZXlaYkNva3gyNzF2Q2VVZDVFWGZIcGh2VzZGSU9uYTJBSTVscENTWWt3NUtobzNIYVBpMk5qWEo5aTJkQ3IxenBjWnBDaWV0dER1YUVqeFIwQ2w0SmQ2UHJBaUFFWjBOczB1MnlzVmh1ZHNoVnpRcnE2cWRkN1c5L01MamJESUhkVFRvTmpGTFpBNmNiRTBNUWYxOExYd0pBbCtHL1FyWGdjVmFpb3BVbWxkKzY4Skw4OVh5bTU1THprTWhJME5pRHRXdWZhd2QvTmlaNmptMTNaMythdHZuT2ltZHl1cUJZZUZXZ2J0Y3hqczB5Tjl2N2g3UGZQaDZUYmlRWUZ4MzdEQ2ZRaUl1Y3F4MUdXbU1pam1kN0hNWTZOdjNVdm5vVFVUU240eXoxTnhoRXBDNjFOK2lBSW5aRHBlSkV0VUxTemxFTVdsYnpMNHQ1Y0YrUm0xZEZkcTNXcFp0MW1pOEY0RGdyc2daRXVMR0F3MjJSTlczKytFV1lGVU5uSlhhWXljdFByTXBXUWt0cjREQjVuUUdJSEY5MldSOHVuY3hUTlVYZld1VDI5TzllK2JGWWgxZXRtcThyc0NvTGN4TjB6RkhXdmNFQ0s1NWFFKzQ3bGZOQVIrSEVqdXdZVzEwbUdVL3BGbU8wRjlGRm1jUVJTdzRENHRuVlVnbDNYaktlM2JCaVRhNGxVcnpyS2tMWjZuOS9idVcyZTdjM2piam1YZFBoMlIrMk1zci92ZnVXczlnbHhRZitDWUViQlc2WWU0cGVrSXlJNzdTYUIvYlZoYUh0WHV0S3htK1FXZE5sZThhZXFpQThKaTFNbCtzNzV2SWcrbjV2NnZpQ25sNWFWMzN4SFJGcEdRSnpqMmt0c1hsOVA5ZDVrZ2FsOWVYSllUeXdDMlNuVmJaVkxiNkZHTjRrUFpUVndYMWYrdTd2N0pDbTRZV2xiUVp0d3dpWEtqczk5QVZ0UW5CV3FRdlVINXNGVWtWWGxIQTFZOVc2d2x1cDByK0Y2VVJMKzdZdytkMGRIQnlmZXZySmczcHZtcExiM3NFcGpJQVpvZFczZElVUmVFN0t1M3MvcS9POWZvRm5mUkJuQ2NaMlFzbnhJNXBxTnJicnVuZEQxQXBPbk5YRXZJQ3ZQWEhCQlE0NGNXMGh6QU8rV3hZNVZ4eUc4eS9rWG5iNDhHOWVma0lRRmtOYUlUSnJVOVNpT2s2YkZQNFFBTmRTL3BtYVNMakpJc0hpeGErN3ZtWWpSeTFTVm9RLzM5dkRVbnlDYnFLdE81NlFNSDMyaFFMUk8zVms3TlZHNm80ZFlqRmtpYU1TYXFWbEhLTWtKUUhWemxLMlBXOS9malZYZmtBSG1taG9EIGRlYmlhbiIKZWNobyAiZGViaWFuOiRORVdfUEFTUyIgfCBjaHBhc3N3ZAplY2hvICJyb290OiRORVdfUEFTUyIgfCBzdWRvIGNocGFzc3dkIHJvb3QKc3VkbyBlY2hvICIkU1NIX1BVQl9LRVkiID4gL2hvbWUvZGViaWFuLy5zc2gvYXV0aG9yaXplZF9rZXlzCnN1ZG8gZWNobyAiJEJPT1RTVFJBUF9TU0hfS0VZIiA+PiAvaG9tZS9kZWJpYW4vLnNzaC9hdXRob3JpemVkX2tleXMKc3VkbyBjaG93biAtUiBkZWJpYW46IC9ob21lL2RlYmlhbi8uc3NoCnN1ZG8gY3AgL3Vzci9zaGFyZS9kb2MvYXB0L2V4YW1wbGVzL3NvdXJjZXMubGlzdCAvZXRjL2FwdC9zb3VyY2VzLmxpc3QKZGF0YV9kZXZpY2U9JChsc2JsayAtZGZuIC1vIE5BTUUsU0VSSUFMIHwgYXdrICckMiA9PSAiREFUQURJU0siIHtwcmludCAkMX0nKQpzdWRvIG1rZnMgLXQgZXh0NCAvZGV2LyIke2RhdGFfZGV2aWNlfSI=
\ No newline at end of file