diff --git a/.env.tmpl b/.env.tmpl index 1ec8ed5c3..a74932320 100644 --- a/.env.tmpl +++ b/.env.tmpl @@ -1,4 +1,6 @@ # For Instaclustr API USERNAME="" APIKEY="" -HOSTNAME="" \ No newline at end of file +HOSTNAME="" +ICADMIN_USERNAME="" +ICADMIN_APIKEY="" \ No newline at end of file diff --git a/apis/clusterresources/v1beta1/maintenanceevents_types.go b/apis/clusterresources/v1beta1/maintenanceevents_types.go index 2c59bd541..819552baa 100644 --- a/apis/clusterresources/v1beta1/maintenanceevents_types.go +++ b/apis/clusterresources/v1beta1/maintenanceevents_types.go @@ -51,9 +51,9 @@ type MaintenanceEventStatus struct { } type ClusteredMaintenanceEventStatus struct { - InProgress []*MaintenanceEventStatus `json:"inProgress"` - Past []*MaintenanceEventStatus `json:"past"` - Upcoming []*MaintenanceEventStatus `json:"upcoming"` + InProgress []*MaintenanceEventStatus `json:"inProgress,omitempty"` + Past []*MaintenanceEventStatus `json:"past,omitempty"` + Upcoming []*MaintenanceEventStatus `json:"upcoming,omitempty"` } //+kubebuilder:object:root=true diff --git a/apis/clusterresources/v1beta1/postgresqluser_types.go b/apis/clusterresources/v1beta1/postgresqluser_types.go index 4fa352c61..b0b0e1102 100644 --- a/apis/clusterresources/v1beta1/postgresqluser_types.go +++ b/apis/clusterresources/v1beta1/postgresqluser_types.go @@ -40,11 +40,6 @@ type ClusterInfo struct { Event string `json:"event,omitempty"` } -type NamespacedName struct { - Namespace string `json:"namespace"` - Name string `json:"name"` -} - //+kubebuilder:object:root=true //+kubebuilder:subresource:status diff --git a/apis/clusterresources/v1beta1/structs.go b/apis/clusterresources/v1beta1/structs.go index 0670d6da4..4377ee5c7 100644 --- a/apis/clusterresources/v1beta1/structs.go +++ b/apis/clusterresources/v1beta1/structs.go @@ -57,3 +57,8 @@ type SecretReference struct { Namespace string `json:"namespace"` Name string `json:"name"` } + +type NamespacedName struct { + Namespace string `json:"namespace"` + Name string `json:"name"` +} diff --git a/apis/clusters/v1beta1/cassandra_types.go b/apis/clusters/v1beta1/cassandra_types.go index 060a043fd..7f3ddc2f4 100644 --- a/apis/clusters/v1beta1/cassandra_types.go +++ b/apis/clusters/v1beta1/cassandra_types.go @@ -55,7 +55,8 @@ type CassandraRestoreFrom struct { // CassandraSpec defines the desired state of Cassandra type CassandraSpec struct { - RestoreFrom *CassandraRestoreFrom `json:"restoreFrom,omitempty"` + RestoreFrom *CassandraRestoreFrom `json:"restoreFrom,omitempty"` + OnPremisesSpec *CassandraOnPremisesSpec `json:"onPremisesSpec,omitempty"` Cluster `json:",inline"` DataCentres []*CassandraDataCentre `json:"dataCentres,omitempty"` LuceneEnabled bool `json:"luceneEnabled,omitempty"` @@ -67,6 +68,18 @@ type CassandraSpec struct { ResizeSettings []*ResizeSettings `json:"resizeSettings,omitempty"` } +type CassandraOnPremisesSpec struct { + StorageClassName string `json:"storageClassName"` + OSDiskSize string `json:"osDiskSize"` + DataDiskSize string `json:"dataDiskSize"` + SSHGatewayCPU int64 `json:"sshGatewayCPU,omitempty"` + SSHGatewayMemory string `json:"sshGatewayMemory,omitempty"` + NodeCPU int64 `json:"nodeCPU"` + NodeMemory string `json:"nodeMemory"` + OSImageURL string `json:"osImageURL"` + CloudInitScriptNamespacedName *NamespacedName `json:"cloudInitScriptNamespacedName"` +} + // CassandraStatus defines the observed state of Cassandra type CassandraStatus struct { ClusterStatus `json:",inline"` @@ -141,7 +154,7 @@ func (c *Cassandra) NewBackupSpec(startTimestamp int) *clusterresourcesv1beta1.C return &clusterresourcesv1beta1.ClusterBackup{ TypeMeta: ctrl.TypeMeta{ Kind: models.ClusterBackupKind, - APIVersion: models.ClusterresourcesV1beta1APIVersion, + APIVersion: models.ClusterResourcesV1beta1APIVersion, }, ObjectMeta: ctrl.ObjectMeta{ Name: models.SnapshotUploadPrefix + c.Status.ID + "-" + strconv.Itoa(startTimestamp), diff --git a/apis/clusters/v1beta1/cassandra_webhook.go b/apis/clusters/v1beta1/cassandra_webhook.go index 8195d30f6..3a648ed36 100644 --- a/apis/clusters/v1beta1/cassandra_webhook.go +++ b/apis/clusters/v1beta1/cassandra_webhook.go @@ -19,6 +19,7 @@ package v1beta1 import ( "context" "fmt" + "regexp" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" @@ -87,6 +88,34 @@ func (cv *cassandraValidator) ValidateCreate(ctx context.Context, obj runtime.Ob return err } + if c.Spec.OnPremisesSpec != nil { + osDiskSizeMatched, err := regexp.Match(models.StorageRegExp, []byte(c.Spec.OnPremisesSpec.OSDiskSize)) + if !osDiskSizeMatched || err != nil { + return fmt.Errorf("disk size field for node OS must fit pattern: %s", + models.StorageRegExp) + } + + dataDiskSizeMatched, err := regexp.Match(models.StorageRegExp, []byte(c.Spec.OnPremisesSpec.DataDiskSize)) + if !dataDiskSizeMatched || err != nil { + return fmt.Errorf("disk size field for storring cluster data must fit pattern: %s", + models.StorageRegExp) + } + + nodeMemoryMatched, err := regexp.Match(models.MemoryRegExp, []byte(c.Spec.OnPremisesSpec.DataDiskSize)) + if !nodeMemoryMatched || err != nil { + return fmt.Errorf("node memory field must fit pattern: %s", + models.MemoryRegExp) + } + + if c.Spec.PrivateNetworkCluster { + sshGatewayMemoryMatched, err := regexp.Match(models.MemoryRegExp, []byte(c.Spec.OnPremisesSpec.DataDiskSize)) + if !sshGatewayMemoryMatched || err != nil { + return fmt.Errorf("ssh gateway memory field must fit pattern: %s", + models.MemoryRegExp) + } + } + } + if len(c.Spec.Spark) > 1 { return fmt.Errorf("spark should not have more than 1 item") } @@ -113,10 +142,22 @@ func (cv *cassandraValidator) ValidateCreate(ctx context.Context, obj runtime.Ob return fmt.Errorf("data centres field is empty") } + //TODO: add support of multiple DCs for OnPrem clusters + if len(c.Spec.DataCentres) > 1 && c.Spec.OnPremisesSpec != nil { + return fmt.Errorf("on-premises cluster can be provisioned with only one data centre") + } + for _, dc := range c.Spec.DataCentres { - err := dc.DataCentre.ValidateCreation() - if err != nil { - return err + if c.Spec.OnPremisesSpec != nil { + err := dc.DataCentre.ValidateOnPremisesCreation() + if err != nil { + return err + } + } else { + err := dc.DataCentre.ValidateCreation() + if err != nil { + return err + } } if !c.Spec.PrivateNetworkCluster && dc.PrivateIPBroadcastForDiscovery { diff --git a/apis/clusters/v1beta1/opensearch_types.go b/apis/clusters/v1beta1/opensearch_types.go index 84b9accad..b7b304076 100644 --- a/apis/clusters/v1beta1/opensearch_types.go +++ b/apis/clusters/v1beta1/opensearch_types.go @@ -539,7 +539,7 @@ func (os *OpenSearch) NewBackupSpec(startTimestamp int) *clusterresourcesv1beta1 return &clusterresourcesv1beta1.ClusterBackup{ TypeMeta: ctrl.TypeMeta{ Kind: models.ClusterBackupKind, - APIVersion: models.ClusterresourcesV1beta1APIVersion, + APIVersion: models.ClusterResourcesV1beta1APIVersion, }, ObjectMeta: ctrl.ObjectMeta{ Name: models.SnapshotUploadPrefix + os.Status.ID + "-" + strconv.Itoa(startTimestamp), diff --git a/apis/clusters/v1beta1/postgresql_types.go b/apis/clusters/v1beta1/postgresql_types.go index a4b062809..af41e000d 100644 --- a/apis/clusters/v1beta1/postgresql_types.go +++ b/apis/clusters/v1beta1/postgresql_types.go @@ -150,7 +150,7 @@ func (pg *PostgreSQL) NewBackupSpec(startTimestamp int) *clusterresourcesv1beta1 return &clusterresourcesv1beta1.ClusterBackup{ TypeMeta: ctrl.TypeMeta{ Kind: models.ClusterBackupKind, - APIVersion: models.ClusterresourcesV1beta1APIVersion, + APIVersion: models.ClusterResourcesV1beta1APIVersion, }, ObjectMeta: ctrl.ObjectMeta{ Name: models.PgBackupPrefix + pg.Status.ID + "-" + strconv.Itoa(startTimestamp), diff --git a/apis/clusters/v1beta1/redis_types.go b/apis/clusters/v1beta1/redis_types.go index bd384d6b2..b6e25b951 100644 --- a/apis/clusters/v1beta1/redis_types.go +++ b/apis/clusters/v1beta1/redis_types.go @@ -132,7 +132,7 @@ func (r *Redis) NewBackupSpec(startTimestamp int) *clusterresourcesv1beta1.Clust return &clusterresourcesv1beta1.ClusterBackup{ TypeMeta: ctrl.TypeMeta{ Kind: models.ClusterBackupKind, - APIVersion: models.ClusterresourcesV1beta1APIVersion, + APIVersion: models.ClusterResourcesV1beta1APIVersion, }, ObjectMeta: ctrl.ObjectMeta{ Name: models.SnapshotUploadPrefix + r.Status.ID + "-" + strconv.Itoa(startTimestamp), diff --git a/apis/clusters/v1beta1/structs.go b/apis/clusters/v1beta1/structs.go index bffb39329..a8f811b6c 100644 --- a/apis/clusters/v1beta1/structs.go +++ b/apis/clusters/v1beta1/structs.go @@ -143,6 +143,61 @@ type privateLinkStatus struct { EndPointServiceName string `json:"endPointServiceName,omitempty"` } +type NamespacedName struct { + Namespace string `json:"namespace"` + Name string `json:"name"` +} + +type Gateway struct { + ID string `json:"id,omitempty"` + ClusterDataCentre string `json:"clusterDataCentre,omitempty"` + ClusterID string `json:"clusterId,omitempty"` + PublicAddress string `json:"publicAddress,omitempty"` + PrivateAddress string `json:"privateAddress,omitempty"` + NatID string `json:"natId,omitempty"` + NatPublicAddress string `json:"natPublicAddress,omitempty"` + NatPrivateAddress string `json:"natPrivateAddress,omitempty"` + NodeAgentVersion string `json:"nodeAgentVersion,omitempty"` + SSHMarkedForDeletion string `json:"sshMarkedForDeletion,omitempty"` + SSHReplaces string `json:"sshReplaces,omitempty"` + NatMarkedForDeletion string `json:"natMarkedForDeletion,omitempty"` + Rack string `json:"rack,omitempty"` + RackID string `json:"rackId,omitempty"` + SSHAWSID string `json:"sshAWSId,omitempty"` +} + +type OnPremiseNode struct { + ID string `json:"id,omitempty"` + ClusterDataCentre string `json:"clusterDataCentre,omitempty"` + AccountID string `json:"accountId,omitempty"` + Status string `json:"status,omitempty"` + PublicAddress string `json:"publicAddress,omitempty"` + PrivateAddress string `json:"privateAddress,omitempty"` + Provider string `json:"provider,omitempty"` + Size string `json:"size,omitempty"` + DeferredReason string `json:"deferredReason,omitempty"` + MarkedForDeletion string `json:"markedForDeletion,omitempty"` + NodeAgentStartDate string `json:"nodeAgentStartDate,omitempty"` + ChargifyDateLastBilled string `json:"chargifyDateLastBilled,omitempty"` + LastOSUpdate string `json:"lastOSUpdate,omitempty"` + Replaces string `json:"replaces,omitempty"` + Rack string `json:"rack,omitempty"` + RackID string `json:"rackId,omitempty"` + DataCentre string `json:"dataCentre,omitempty"` + ForceStart bool `json:"forceStart,omitempty"` + BundleStartEnabled bool `json:"bundleStartEnabled,omitempty"` + ClusterID string `json:"clusterId,omitempty"` + EphemeralStorageDiskCount int `json:"ephemeralStorageDiskCount,omitempty"` + PersistentStorageDiskCount int `json:"persistentStorageDiskCount,omitempty"` + CacheDiskQuota int `json:"cacheDiskQuota,omitempty"` + FailureReason string `json:"failureReason,omitempty"` + NodeAgentVersion string `json:"nodeAgentVersion,omitempty"` + OSVersionID string `json:"osVersionId,omitempty"` + OSBuildID string `json:"osBuildId,omitempty"` + DiskQuota int `json:"diskQuota,omitempty"` + InstanceStore bool `json:"instanceStore,omitempty"` +} + type PrivateLinkStatuses []*privateLinkStatus func (p1 PrivateLinkStatuses) Equal(p2 PrivateLinkStatuses) bool { diff --git a/apis/clusters/v1beta1/validation.go b/apis/clusters/v1beta1/validation.go index 6eb8e742f..35f510997 100644 --- a/apis/clusters/v1beta1/validation.go +++ b/apis/clusters/v1beta1/validation.go @@ -47,6 +47,25 @@ func (c *Cluster) ValidateCreation() error { return nil } +func (dc *DataCentre) ValidateOnPremisesCreation() error { + if dc.CloudProvider != models.ONPREMISES { + return fmt.Errorf("cloud provider %s is unavailable for data centre: %s, available value: %s", + dc.CloudProvider, dc.Name, models.ONPREMISES) + } + + if dc.Region != models.CLIENTDC { + return fmt.Errorf("region %s is unavailable for data centre: %s, available value: %s", + dc.Region, dc.Name, models.CLIENTDC) + } + + if !validation.Contains(dc.NodeSize, models.CassandraOnPremNodes) { + return fmt.Errorf("on-premises node size: %s is unavailable, available sizes: %v", + dc.Region, models.CassandraOnPremNodes) + } + + return nil +} + func (dc *DataCentre) ValidateCreation() error { if !validation.Contains(dc.CloudProvider, models.CloudProviders) { return fmt.Errorf("cloud provider %s is unavailable for data centre: %s, available values: %v", diff --git a/apis/clusters/v1beta1/zz_generated.deepcopy.go b/apis/clusters/v1beta1/zz_generated.deepcopy.go index eb4beebbd..2f53f6749 100644 --- a/apis/clusters/v1beta1/zz_generated.deepcopy.go +++ b/apis/clusters/v1beta1/zz_generated.deepcopy.go @@ -423,6 +423,26 @@ func (in *CassandraList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CassandraOnPremisesSpec) DeepCopyInto(out *CassandraOnPremisesSpec) { + *out = *in + if in.CloudInitScriptNamespacedName != nil { + in, out := &in.CloudInitScriptNamespacedName, &out.CloudInitScriptNamespacedName + *out = new(NamespacedName) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CassandraOnPremisesSpec. +func (in *CassandraOnPremisesSpec) DeepCopy() *CassandraOnPremisesSpec { + if in == nil { + return nil + } + out := new(CassandraOnPremisesSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CassandraRestoreFrom) DeepCopyInto(out *CassandraRestoreFrom) { *out = *in @@ -457,6 +477,11 @@ func (in *CassandraSpec) DeepCopyInto(out *CassandraSpec) { *out = new(CassandraRestoreFrom) (*in).DeepCopyInto(*out) } + if in.OnPremisesSpec != nil { + in, out := &in.OnPremisesSpec, &out.OnPremisesSpec + *out = new(CassandraOnPremisesSpec) + (*in).DeepCopyInto(*out) + } in.Cluster.DeepCopyInto(&out.Cluster) if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres @@ -850,6 +875,21 @@ func (in *GCPConnectorSettings) DeepCopy() *GCPConnectorSettings { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gateway) DeepCopyInto(out *Gateway) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gateway. +func (in *Gateway) DeepCopy() *Gateway { + if in == nil { + return nil + } + out := new(Gateway) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InterDataCentreReplication) DeepCopyInto(out *InterDataCentreReplication) { *out = *in @@ -1297,6 +1337,21 @@ func (in *ManagedCluster) DeepCopy() *ManagedCluster { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespacedName) DeepCopyInto(out *NamespacedName) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespacedName. +func (in *NamespacedName) DeepCopy() *NamespacedName { + if in == nil { + return nil + } + out := new(NamespacedName) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Node) DeepCopyInto(out *Node) { *out = *in @@ -1317,6 +1372,21 @@ func (in *Node) DeepCopy() *Node { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremiseNode) DeepCopyInto(out *OnPremiseNode) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremiseNode. +func (in *OnPremiseNode) DeepCopy() *OnPremiseNode { + if in == nil { + return nil + } + out := new(OnPremiseNode) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpenSearch) DeepCopyInto(out *OpenSearch) { *out = *in diff --git a/config/crd/bases/clusters.instaclustr.com_cadences.yaml b/config/crd/bases/clusters.instaclustr.com_cadences.yaml index 430055b50..1fd0ba456 100644 --- a/config/crd/bases/clusters.instaclustr.com_cadences.yaml +++ b/config/crd/bases/clusters.instaclustr.com_cadences.yaml @@ -493,10 +493,6 @@ spec: - isFinalized type: object type: array - required: - - inProgress - - past - - upcoming type: object type: array options: diff --git a/config/crd/bases/clusters.instaclustr.com_cassandras.yaml b/config/crd/bases/clusters.instaclustr.com_cassandras.yaml index a28675bc9..8fda040e9 100644 --- a/config/crd/bases/clusters.instaclustr.com_cassandras.yaml +++ b/config/crd/bases/clusters.instaclustr.com_cassandras.yaml @@ -109,6 +109,45 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string + onPremisesSpec: + properties: + cloudInitScriptNamespacedName: + properties: + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + dataDiskSize: + type: string + nodeCPU: + format: int64 + type: integer + nodeMemory: + type: string + osDiskSize: + type: string + osImageURL: + type: string + sshGatewayCPU: + format: int64 + type: integer + sshGatewayMemory: + type: string + storageClassName: + type: string + required: + - cloudInitScriptNamespacedName + - dataDiskSize + - nodeCPU + - nodeMemory + - osDiskSize + - osImageURL + - storageClassName + type: object passwordAndUserAuth: type: boolean pciCompliance: @@ -415,10 +454,6 @@ spec: - isFinalized type: object type: array - required: - - inProgress - - past - - upcoming type: object type: array options: diff --git a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml index fa695dab9..87209b6b0 100644 --- a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml +++ b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml @@ -451,10 +451,6 @@ spec: - isFinalized type: object type: array - required: - - inProgress - - past - - upcoming type: object type: array options: diff --git a/config/crd/bases/clusters.instaclustr.com_kafkas.yaml b/config/crd/bases/clusters.instaclustr.com_kafkas.yaml index fa81af863..a6e3de764 100644 --- a/config/crd/bases/clusters.instaclustr.com_kafkas.yaml +++ b/config/crd/bases/clusters.instaclustr.com_kafkas.yaml @@ -455,10 +455,6 @@ spec: - isFinalized type: object type: array - required: - - inProgress - - past - - upcoming type: object type: array options: diff --git a/config/crd/bases/clusters.instaclustr.com_opensearches.yaml b/config/crd/bases/clusters.instaclustr.com_opensearches.yaml index 628ea13f4..f5e0a111a 100644 --- a/config/crd/bases/clusters.instaclustr.com_opensearches.yaml +++ b/config/crd/bases/clusters.instaclustr.com_opensearches.yaml @@ -445,10 +445,6 @@ spec: - isFinalized type: object type: array - required: - - inProgress - - past - - upcoming type: object type: array options: diff --git a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml index 86fbce8df..5ec93a122 100644 --- a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml +++ b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml @@ -421,10 +421,6 @@ spec: - isFinalized type: object type: array - required: - - inProgress - - past - - upcoming type: object type: array options: diff --git a/config/crd/bases/clusters.instaclustr.com_redis.yaml b/config/crd/bases/clusters.instaclustr.com_redis.yaml index f37da108c..af22bf018 100644 --- a/config/crd/bases/clusters.instaclustr.com_redis.yaml +++ b/config/crd/bases/clusters.instaclustr.com_redis.yaml @@ -409,10 +409,6 @@ spec: - isFinalized type: object type: array - required: - - inProgress - - past - - upcoming type: object type: array options: diff --git a/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml b/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml index 061a0bc49..412c538db 100644 --- a/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml +++ b/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml @@ -328,10 +328,6 @@ spec: - isFinalized type: object type: array - required: - - inProgress - - past - - upcoming type: object type: array options: diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index cda2a8418..e633f2176 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -49,6 +49,16 @@ spec: secretKeyRef: name: creds-secret key: HOSTNAME + - name: ICADMIN_USERNAME + valueFrom: + secretKeyRef: + name: creds-secret + key: ICADMIN_USERNAME + - name: ICADMIN_APIKEY + valueFrom: + secretKeyRef: + name: creds-secret + key: ICADMIN_APIKEY args: - "--health-probe-bind-address=:8081" - "--metrics-bind-address=127.0.0.1:8080" diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 26bf3de63..bb755a286 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -8,29 +8,42 @@ rules: - apiGroups: - "" resources: - - endpoints + - events verbs: - create - - delete + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: - get - list - - patch - - update - watch - apiGroups: - "" resources: - - events + - persistentvolumeclaims verbs: - create + - delete + - deletecollection + - get + - list - patch + - update + - watch - apiGroups: - "" resources: - - nodes + - pods verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - "" @@ -39,6 +52,7 @@ rules: verbs: - create - delete + - deletecollection - get - list - patch @@ -51,6 +65,20 @@ rules: verbs: - create - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - cdi.kubevirt.io + resources: + - datavolumes + verbs: + - create + - delete + - deletecollection - get - list - patch @@ -770,3 +798,29 @@ rules: - get - patch - update +- apiGroups: + - kubevirt.io + resources: + - virtualmachineinstances + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - kubevirt.io + resources: + - virtualmachines + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch diff --git a/config/samples/clusters_v1beta1_cassandra.yaml b/config/samples/clusters_v1beta1_cassandra.yaml index d1e9be089..3b2f0c9fc 100644 --- a/config/samples/clusters_v1beta1_cassandra.yaml +++ b/config/samples/clusters_v1beta1_cassandra.yaml @@ -3,24 +3,48 @@ kind: Cassandra metadata: name: cassandra-cluster spec: - name: "username-Cassandra" + name: "danylo-Cassandra" version: "4.0.10" - privateNetworkCluster: false + privateNetworkCluster: true + onPremisesSpec: + storageClassName: managed-csi-premium + osDiskSize: 20Gi + dataDiskSize: 200Gi + sshGatewayCPU: 2 + sshGatewayMemory: 4096Mi + nodeCPU: 2 + nodeMemory: 8192Mi + osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw" + cloudInitScriptNamespacedName: + namespace: default + name: instaclustr-cloud-init-secret dataCentres: - - name: "AWS_cassandra" - region: "US_WEST_2" - cloudProvider: "AWS_VPC" + - name: "onPremCassandra" + region: "CLIENT_DC" + cloudProvider: "ONPREMISES" continuousBackup: false - nodesNumber: 2 - replicationFactor: 2 - privateIpBroadcastForDiscovery: false - network: "172.16.0.0/19" + nodesNumber: 3 + replicationFactor: 3 + privateIpBroadcastForDiscovery: true + network: "192.168.0.0/16" tags: - "tag": "testTag" + "onprem": "test" clientToClusterEncryption: false + nodeSize: "CAS-PRD-OP.4.8-200" +# - name: "AWS_cassandra" +# region: "US_WEST_2" +# cloudProvider: "AWS_VPC" +# continuousBackup: false +# nodesNumber: 2 +# replicationFactor: 2 +# privateIpBroadcastForDiscovery: false +# network: "172.16.0.0/19" +# tags: +# "tag": "testTag" +# clientToClusterEncryption: false # cloudProviderSettings: # - customVirtualNetworkId: "vpc-0b69c781969e980a9" - nodeSize: "CAS-DEV-t4g.small-5" +# nodeSize: "CAS-DEV-t4g.small-5" # accountName: "InstaclustrRIYOA" # - name: "AWS_cassandra2" # region: "US_EAST_1" @@ -36,7 +60,7 @@ spec: # nodeSize: "CAS-DEV-t4g.small-30" pciCompliance: false luceneEnabled: false # can be enabled only on 3.11.13 version of Cassandra - passwordAndUserAuth: true + passwordAndUserAuth: false # userRefs: # - namespace: default # name: cassandrauser-sample diff --git a/controllers/clusterresources/awsendpointserviceprincipal_controller.go b/controllers/clusterresources/awsendpointserviceprincipal_controller.go index d7659ce11..314d75fe3 100644 --- a/controllers/clusterresources/awsendpointserviceprincipal_controller.go +++ b/controllers/clusterresources/awsendpointserviceprincipal_controller.go @@ -111,7 +111,7 @@ func (r *AWSEndpointServicePrincipalReconciler) handleCreate(ctx context.Context err = json.Unmarshal(b, &principal.Status) if err != nil { l.Error(err, "failed to parse an AWS endpoint service principal resource response from Instaclustr") - r.EventRecorder.Eventf(principal, models.Warning, models.ConvertionFailed, + r.EventRecorder.Eventf(principal, models.Warning, models.ConversionFailed, "Failed to parse an AWS endpoint service principal resource response from Instaclustr. Reason: %v", err, ) diff --git a/controllers/clusterresources/clusterbackup_controller.go b/controllers/clusterresources/clusterbackup_controller.go index cbd72f2c6..64f4863e1 100644 --- a/controllers/clusterresources/clusterbackup_controller.go +++ b/controllers/clusterresources/clusterbackup_controller.go @@ -170,7 +170,7 @@ func (r *ClusterBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reques ) r.EventRecorder.Eventf( - backup, models.Warning, models.ConvertionFailed, + backup, models.Warning, models.ConversionFailed, "Start timestamp annotation convertion to int is failed. Reason: %v", err, ) diff --git a/controllers/clusterresources/postgresqluser_controller.go b/controllers/clusterresources/postgresqluser_controller.go index 601627e1d..a199783fd 100644 --- a/controllers/clusterresources/postgresqluser_controller.go +++ b/controllers/clusterresources/postgresqluser_controller.go @@ -409,7 +409,7 @@ func (r *PostgreSQLUserReconciler) createPostgreSQLFirewallRule( firewallRule := &clusterresourcesv1beta1.ClusterNetworkFirewallRule{ TypeMeta: ctrl.TypeMeta{ Kind: models.ClusterNetworkFirewallRuleKind, - APIVersion: models.ClusterresourcesV1beta1APIVersion, + APIVersion: models.ClusterResourcesV1beta1APIVersion, }, ObjectMeta: ctrl.ObjectMeta{ Name: firewallRuleName, diff --git a/controllers/clusters/cadence_controller.go b/controllers/clusters/cadence_controller.go index 5945367ec..1bd956396 100644 --- a/controllers/clusters/cadence_controller.go +++ b/controllers/clusters/cadence_controller.go @@ -149,7 +149,7 @@ func (r *CadenceReconciler) HandleCreateCluster( logger.Error(err, "Cannot convert Cadence cluster manifest to API spec", "cluster manifest", cadence.Spec) - r.EventRecorder.Eventf(cadence, models.Warning, models.ConvertionFailed, + r.EventRecorder.Eventf(cadence, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err) return models.ReconcileRequeue @@ -269,7 +269,7 @@ func (r *CadenceReconciler) HandleUpdateCluster( "cluster ID", cadence.Status.ID, ) - r.EventRecorder.Eventf(cadence, models.Warning, models.ConvertionFailed, + r.EventRecorder.Eventf(cadence, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err) return models.ReconcileRequeue diff --git a/controllers/clusters/cassandra_controller.go b/controllers/clusters/cassandra_controller.go index 02de8700c..529bd6590 100644 --- a/controllers/clusters/cassandra_controller.go +++ b/controllers/clusters/cassandra_controller.go @@ -19,13 +19,22 @@ package clusters import ( "context" "errors" + "fmt" "strconv" + "strings" "github.com/go-logr/logr" + k8scorev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" + virtcorev1 "kubevirt.io/api/core/v1" + cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -45,15 +54,12 @@ import ( "github.com/instaclustr/operator/pkg/scheduler" ) -const ( - StatusRUNNING = "RUNNING" -) - // CassandraReconciler reconciles a Cassandra object type CassandraReconciler struct { client.Client Scheme *runtime.Scheme API instaclustr.API + IcadminAPI instaclustr.IcadminAPI Scheduler scheduler.Interface EventRecorder record.EventRecorder } @@ -62,8 +68,14 @@ type CassandraReconciler struct { //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=cassandras/status,verbs=get;update;patch //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=cassandras/finalizers,verbs=update //+kubebuilder:rbac:groups="",resources=events,verbs=create;patch -//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups="",resources=endpoints,verbs=get;list;watch;create;update;patch;delete +//+virtualmachineinstance.kubevirt.io/node-vm-2-cassandra-cluster:rbac:groups="",resources=endpoints,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -89,10 +101,16 @@ func (r *CassandraReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( switch cassandra.Annotations[models.ResourceStateAnnotation] { case models.CreatingEvent: + if cassandra.Spec.OnPremisesSpec != nil { + return r.handleCreateOnPremisesCluster(ctx, l, cassandra) + } return r.handleCreateCluster(ctx, l, cassandra) case models.UpdatingEvent: return r.handleUpdateCluster(ctx, l, cassandra) case models.DeletingEvent: + if cassandra.Spec.OnPremisesSpec != nil { + return r.handleDeleteOnPremisesCluster(ctx, l, cassandra) + } return r.handleDeleteCluster(ctx, l, cassandra) case models.GenericEvent: l.Info("Event isn't handled", @@ -229,13 +247,13 @@ func (r *CassandraReconciler) handleCreateCluster( l.Error(err, "Cannot start cluster status job", "cassandra cluster ID", cassandra.Status.ID) - r.EventRecorder.Eventf( - cassandra, models.Warning, models.CreationFailed, - "Cluster status check job is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } + r.EventRecorder.Eventf( + cassandra, models.Warning, models.CreationFailed, + "Cluster status check job is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } r.EventRecorder.Eventf( cassandra, models.Normal, models.Created, @@ -248,13 +266,13 @@ func (r *CassandraReconciler) handleCreateCluster( "cluster ID", cassandra.Status.ID, ) - r.EventRecorder.Eventf( - cassandra, models.Warning, models.CreationFailed, - "Cluster backups check job is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } + r.EventRecorder.Eventf( + cassandra, models.Warning, models.CreationFailed, + "Cluster backups check job is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } r.EventRecorder.Eventf( cassandra, models.Normal, models.Created, @@ -278,6 +296,168 @@ func (r *CassandraReconciler) handleCreateCluster( return models.ExitReconcile, nil } +func (r *CassandraReconciler) handleCreateOnPremisesCluster( + ctx context.Context, + l logr.Logger, + cassandra *v1beta1.Cassandra, +) (reconcile.Result, error) { + l = l.WithName("On-premises Cassandra creation event") + patch := cassandra.NewPatch() + if cassandra.Status.ID == "" { + l.Info( + "Creating on-premises cluster", + "cluster name", cassandra.Spec.Name, + "data centres", cassandra.Spec.DataCentres, + ) + + id, err := r.API.CreateCluster(instaclustr.CassandraEndpoint, cassandra.Spec.ToInstAPI()) + if err != nil { + l.Error( + err, "Cannot create cluster", + "cluster spec", cassandra.Spec, + ) + r.EventRecorder.Eventf( + cassandra, models.Warning, models.CreationFailed, + "Cluster creation on the Instaclustr is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + r.EventRecorder.Eventf( + cassandra, models.Normal, models.Created, + "Cluster creation request is sent. Cluster ID: %s", + id, + ) + + cassandra.Status.ID = id + err = r.Status().Patch(ctx, cassandra, patch) + if err != nil { + l.Error(err, "Cannot patch cluster status", + "cluster name", cassandra.Spec.Name, + "cluster ID", cassandra.Status.ID, + "kind", cassandra.Kind, + "api Version", cassandra.APIVersion, + "namespace", cassandra.Namespace, + "cluster metadata", cassandra.ObjectMeta, + ) + r.EventRecorder.Eventf( + cassandra, models.Warning, models.PatchFailed, + "Cluster resource status patch is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + cassandra.Annotations[models.ResourceStateAnnotation] = models.CreatingEvent + err = r.Patch(ctx, cassandra, patch) + if err != nil { + l.Error(err, "Cannot patch cluster", + "cluster name", cassandra.Spec.Name, + "cluster ID", cassandra.Status.ID, + "kind", cassandra.Kind, + "api Version", cassandra.APIVersion, + "namespace", cassandra.Namespace, + "cluster metadata", cassandra.ObjectMeta, + ) + r.EventRecorder.Eventf( + cassandra, models.Warning, models.PatchFailed, + "Cluster resource patch is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + err = r.startClusterStatusJob(cassandra) + if err != nil { + l.Error(err, "Cannot start cluster status job", + "cassandra cluster ID", cassandra.Status.ID) + + r.EventRecorder.Eventf( + cassandra, models.Warning, models.CreationFailed, + "Cluster status check job is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + r.EventRecorder.Eventf( + cassandra, models.Normal, models.Created, + "Cluster status check job is started", + ) + } + + if len(cassandra.Status.DataCentres) > 0 && cassandra.Status.State != models.RunningStatus { + err := r.reconcileOnPremResources(ctx, cassandra) + if err != nil { + l.Error( + err, "Cannot create resources for on-premises cluster", + "cluster spec", cassandra.Spec.OnPremisesSpec, + ) + r.EventRecorder.Eventf( + cassandra, models.Warning, models.CreationFailed, + "Resources creation for on-premises cluster is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + l.Info( + "On-premises resources have been created", + "cluster name", cassandra.Spec.Name, + "on-premises Spec", cassandra.Spec.OnPremisesSpec, + "cluster ID", cassandra.Status.ID, + ) + + } else { + l.Info("Waiting for Data Centres provisioning...") + return models.ReconcileRequeue, nil + } + + controllerutil.AddFinalizer(cassandra, models.DeletionFinalizer) + cassandra.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent + err := r.Patch(ctx, cassandra, patch) + if err != nil { + l.Error(err, "Cannot patch cluster", + "cluster name", cassandra.Spec.Name, + "cluster ID", cassandra.Status.ID, + "kind", cassandra.Kind, + "api Version", cassandra.APIVersion, + "namespace", cassandra.Namespace, + "cluster metadata", cassandra.ObjectMeta, + ) + r.EventRecorder.Eventf( + cassandra, models.Warning, models.PatchFailed, + "Cluster resource patch is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + l.Info( + "Cluster has been created", + "cluster name", cassandra.Spec.Name, + "cluster ID", cassandra.Status.ID, + "kind", cassandra.Kind, + "api Version", cassandra.APIVersion, + "namespace", cassandra.Namespace, + ) + + err = r.startClusterOnPremisesIPsJob(cassandra) + if err != nil { + l.Error(err, "Cannot start cluster on-premises IPs job", + "cassandra cluster ID", cassandra.Status.ID) + + r.EventRecorder.Eventf( + cassandra, models.Warning, models.CreationFailed, + "Cluster on-premises IPs job is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + return models.ExitReconcile, nil +} + func (r *CassandraReconciler) handleUpdateCluster( ctx context.Context, l logr.Logger, @@ -309,7 +489,7 @@ func (r *CassandraReconciler) handleUpdateCluster( ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.ConvertionFailed, + cassandra, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err, ) @@ -623,6 +803,140 @@ func (r *CassandraReconciler) handleDeleteCluster( return models.ExitReconcile, nil } +func (r *CassandraReconciler) handleDeleteOnPremisesCluster( + ctx context.Context, + l logr.Logger, + c *v1beta1.Cassandra, +) (reconcile.Result, error) { + l = l.WithName("On-premises Cassandra deletion event") + + _, err := r.API.GetCassandra(c.Status.ID) + if err != nil && !errors.Is(err, instaclustr.NotFound) { + l.Error( + err, "Cannot get cluster from the Instaclustr API", + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "kind", c.Kind, + "api Version", c.APIVersion, + "namespace", c.Namespace, + ) + r.EventRecorder.Eventf( + c, models.Warning, models.FetchFailed, + "Cluster fetch from the Instaclustr API is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + patch := c.NewPatch() + + if !errors.Is(err, instaclustr.NotFound) { + l.Info("Sending cluster deletion to the Instaclustr API", + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID) + + err = r.API.DeleteCluster(c.Status.ID, instaclustr.CassandraEndpoint) + if err != nil { + l.Error(err, "Cannot delete cluster", + "cluster name", c.Spec.Name, + "state", c.Status.State, + "kind", c.Kind, + "api Version", c.APIVersion, + "namespace", c.Namespace, + ) + r.EventRecorder.Eventf( + c, models.Warning, models.DeletionFailed, + "Cluster deletion on the Instaclustr API is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + r.EventRecorder.Event(c, models.Normal, models.DeletionStarted, + "Cluster deletion request is sent to the Instaclustr API.") + + if c.Spec.TwoFactorDelete != nil { + c.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent + c.Annotations[models.ClusterDeletionAnnotation] = models.Triggered + err = r.Patch(ctx, c, patch) + if err != nil { + l.Error(err, "Cannot patch cluster resource", + "cluster name", c.Spec.Name, + "cluster state", c.Status.State) + r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed, + "Cluster resource patch is failed. Reason: %v", err) + + return reconcile.Result{}, err + } + + l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", c.Status.ID) + + r.EventRecorder.Event(c, models.Normal, models.DeletionStarted, + "Two-Factor Delete is enabled, please confirm cluster deletion via email or phone.") + + return reconcile.Result{}, err + } + } + + r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.OnPremisesIPsChecker)) + + err = r.deleteOnPremResources(ctx, c) + if err != nil { + l.Error(err, "Cannot delete cluster on-premises resources", + "cluster ID", c.Status.ID, + ) + r.EventRecorder.Eventf( + c, models.Warning, models.DeletionFailed, + "Cluster on-premises resources deletion is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + l.Info("Cluster on-premises resources are deleted", + "cluster ID", c.Status.ID, + ) + r.EventRecorder.Eventf( + c, models.Normal, models.Deleted, + "Cluster on-premises resources are deleted deleted", + ) + + controllerutil.RemoveFinalizer(c, models.DeletionFinalizer) + c.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent + err = r.Patch(ctx, c, patch) + if err != nil { + l.Error(err, "Cannot patch cluster resource", + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "kind", c.Kind, + "api Version", c.APIVersion, + "namespace", c.Namespace, + "cluster metadata", c.ObjectMeta, + ) + + r.EventRecorder.Eventf( + c, models.Warning, models.PatchFailed, + "Cluster resource patch is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + l.Info("Cluster has been deleted", + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "kind", c.Kind, + "api Version", c.APIVersion) + + r.EventRecorder.Eventf( + c, models.Normal, models.Deleted, + "Cluster resource is deleted", + ) + + return models.ExitReconcile, nil +} + func (r *CassandraReconciler) handleUsersCreate( ctx context.Context, l logr.Logger, @@ -877,25 +1191,260 @@ func (r *CassandraReconciler) startUsersCreationJob(cluster *v1beta1.Cassandra) return nil } -func (r *CassandraReconciler) newWatchStatusJob(cassandra *v1beta1.Cassandra) scheduler.Job { - l := log.Log.WithValues("component", "CassandraStatusClusterJob") - return func() error { - namespacedName := client.ObjectKeyFromObject(cassandra) - err := r.Get(context.Background(), namespacedName, cassandra) - if k8serrors.IsNotFound(err) { - l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", - "namespaced name", namespacedName) - r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.StatusChecker)) - return nil - } +func (r *CassandraReconciler) startClusterOnPremisesIPsJob(cluster *v1beta1.Cassandra) error { + job := r.newWatchOnPremisesIPsJob(cluster) - iData, err := r.API.GetCassandra(cassandra.Status.ID) - if err != nil { - if errors.Is(err, instaclustr.NotFound) { - return r.handleExternalDelete(context.Background(), cassandra) - } + err := r.Scheduler.ScheduleJob(cluster.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job) + if err != nil { + return err + } + + return nil +} + +func (r *CassandraReconciler) newWatchOnPremisesIPsJob(c *v1beta1.Cassandra) scheduler.Job { + l := log.Log.WithValues("component", "cassandraOnPremStatusClusterJob") + + return func() error { + gateways, err := r.IcadminAPI.GetGateways(c.Status.DataCentres[0].ID) + if err != nil { + l.Error(err, "Cannot get Cassandra SSH-gateway nodes from the Instaclustr API", + "cluster name", c.Spec.Name, + "status", c.Status) + r.EventRecorder.Eventf( + c, models.Warning, models.FetchFailed, + "SSH-gateway nodes fetch from the Instaclustr API is failed. Reason: %v", + err, + ) + return err + } + + for _, gateway := range gateways { + gatewayPods := &k8scorev1.PodList{} + err = r.List(context.Background(), gatewayPods, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: c.Status.ID, + models.NodeIDLabel: gateway.ID, + }), + Namespace: c.Namespace, + }) + if err != nil { + l.Error(err, "Cannot list SSH-gateway pods", + "cluster name", c.Spec.Name, + "clusterID", c.Status.ID, + ) + + r.EventRecorder.Eventf( + c, models.Warning, models.CreationFailed, + "Fetching SSH-gateways is failed. Reason: %v", + err, + ) + return err + } + + for _, pod := range gatewayPods.Items { + if (pod.Status.PodIP != "" && gateway.PrivateAddress == "") || + (pod.Status.PodIP != "" && pod.Status.PodIP != gateway.PrivateAddress) { + err = r.IcadminAPI.SetPrivateGatewayIP(c.Status.DataCentres[0].ID, pod.Status.PodIP) + if err != nil { + l.Error(err, "Cannot set Private IP for the SSH-gateway node", + "cluster name", c.Spec.Name, + "clusterID", c.Status.ID, + ) + + r.EventRecorder.Eventf( + c, models.Warning, models.CreationFailed, + "Setting Private IP for the SSH-gateway node is failed. Reason: %v", + err, + ) + return err + } + } + } + + gatewaySVCs := &k8scorev1.ServiceList{} + err = r.List(context.Background(), gatewaySVCs, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: c.Status.ID, + models.NodeIDLabel: gateway.ID, + }), + Namespace: c.Namespace, + }) + if err != nil { + l.Error(err, "Cannot get services backed by SSH-gateway pods", + "cluster name", c.Spec.Name, + "clusterID", c.Status.ID, + ) + + r.EventRecorder.Eventf( + c, models.Warning, models.CreationFailed, + "Fetching services backed by SSH-gateway pods is failed. Reason: %v", + err, + ) + return err + } + + for _, svc := range gatewaySVCs.Items { + if (svc.Status.LoadBalancer.Ingress[0].IP != "" && gateway.PublicAddress == "") || + (svc.Status.LoadBalancer.Ingress[0].IP != gateway.PublicAddress) { + err = r.IcadminAPI.SetPublicGatewayIP(c.Status.DataCentres[0].ID, svc.Status.LoadBalancer.Ingress[0].IP) + if err != nil { + l.Error(err, "Cannot set Public IP for the SSH-gateway node", + "cluster name", c.Spec.Name, + "clusterID", c.Status.ID, + ) + + r.EventRecorder.Eventf( + c, models.Warning, models.CreationFailed, + "Setting Public IP for the SSH-gateway node is failed. Reason: %v", + err, + ) + return err + } + + l.Info("IPs for on-premises cluster ssh-gateway are set", + "cluster name", c.Spec.Name, + "clusterID", c.Status.ID, + ) + + r.EventRecorder.Eventf( + c, models.Normal, models.Created, + "SSH-gateway IPs are set", + ) + } + } + } + + nodes, err := r.IcadminAPI.GetOnPremisesNodes(c.Status.ID) + if err != nil { + l.Error(err, "Cannot get Cassandra on-premises nodes from the Instaclustr API", + "cluster name", c.Spec.Name, + "status", c.Status) + r.EventRecorder.Eventf( + c, models.Warning, models.FetchFailed, + "On-premises nodes fetch from the Instaclustr API is failed. Reason: %v", + err, + ) + return err + } + + request := &v1beta1.OnPremiseNode{} + + for _, node := range nodes { + nodePods := &k8scorev1.PodList{} + err = r.List(context.Background(), nodePods, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: c.Status.ID, + models.NodeIDLabel: node.ID, + }), + Namespace: c.Namespace, + }) + if err != nil { + l.Error(err, "Cannot get on-premises cluster pods", + "cluster name", c.Spec.Name, + "clusterID", c.Status.ID, + ) + + r.EventRecorder.Eventf( + c, models.Warning, models.CreationFailed, + "Fetching on-premises cluster pods is failed. Reason: %v", + err, + ) + return err + } + + nodeSVCs := &k8scorev1.ServiceList{} + err = r.List(context.Background(), nodeSVCs, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: c.Status.ID, + models.NodeIDLabel: node.ID, + }), + Namespace: c.Namespace, + }) + if err != nil { + l.Error(err, "Cannot get services backed by on-premises cluster pods", + "cluster name", c.Spec.Name, + "clusterID", c.Status.ID, + ) + + r.EventRecorder.Eventf( + c, models.Warning, models.CreationFailed, + "Fetching services backed by on-premises cluster pods is failed. Reason: %v", + err, + ) + return err + } + + for _, pod := range nodePods.Items { + if (pod.Status.PodIP != "" && node.PrivateAddress == "") || + (pod.Status.PodIP != "" && pod.Status.PodIP != node.PrivateAddress) { + request.PrivateAddress = pod.Status.PodIP + } + } + + for _, svc := range nodeSVCs.Items { + if (svc.Status.LoadBalancer.Ingress[0].IP != "" && node.PublicAddress == "") || + (svc.Status.LoadBalancer.Ingress[0].IP != node.PublicAddress) { + request.PublicAddress = svc.Status.LoadBalancer.Ingress[0].IP + } + } + + if request.PublicAddress != "" || request.PrivateAddress != "" { + err = r.IcadminAPI.SetNodeIPs(node.ID, request) + if err != nil { + l.Error(err, "Cannot set IPs for on-premises cluster nodes", + "cluster name", c.Spec.Name, + "clusterID", c.Status.ID, + ) + + r.EventRecorder.Eventf( + c, models.Warning, models.CreationFailed, + "Setting IPs for on-premises cluster nodes is failed. Reason: %v", + err, + ) + return err + } + + l.Info("IPs for on-premises cluster node are set", + "cluster name", c.Spec.Name, + "clusterID", c.Status.ID, + ) + r.EventRecorder.Eventf( + c, models.Normal, models.Created, + "Nodes IPs are set", + ) + } + } + return nil + } +} + +func (r *CassandraReconciler) newWatchStatusJob(cassandra *v1beta1.Cassandra) scheduler.Job { + l := log.Log.WithValues("component", "CassandraStatusClusterJob") + return func() error { + namespacedName := client.ObjectKeyFromObject(cassandra) + err := r.Get(context.Background(), namespacedName, cassandra) + if k8serrors.IsNotFound(err) { + l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", + "namespaced name", namespacedName) + + if cassandra.Spec.OnPremisesSpec != nil { + r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.OnPremisesIPsChecker)) + return nil + } + + r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.BackupsChecker)) + r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.UserCreator)) + r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.StatusChecker)) + return nil + } + + iData, err := r.API.GetCassandra(cassandra.Status.ID) + if err != nil { + if errors.Is(err, instaclustr.NotFound) { + return r.handleExternalDelete(context.Background(), cassandra) + } l.Error(err, "Cannot get cluster from the Instaclustr API", "clusterID", cassandra.Status.ID) @@ -925,7 +1474,7 @@ func (r *CassandraReconciler) newWatchStatusJob(cassandra *v1beta1.Cassandra) sc return err } - if !areDCsEqual { + if !areDCsEqual && cassandra.Spec.OnPremisesSpec == nil { var nodes []*v1beta1.Node for _, dc := range iCassandra.Status.ClusterStatus.DataCentres { @@ -1233,6 +1782,715 @@ func (r *CassandraReconciler) reconcileMaintenanceEvents(ctx context.Context, c return nil } +func (r *CassandraReconciler) reconcileOnPremResources( + ctx context.Context, + c *v1beta1.Cassandra, +) error { + if c.Spec.PrivateNetworkCluster { + err := r.reconcileSSHGatewayResources(ctx, c) + if err != nil { + return err + } + } + + err := r.reconcileNodesResources(ctx, c) + if err != nil { + return err + } + + return nil +} + +func (r *CassandraReconciler) reconcileSSHGatewayResources( + ctx context.Context, + c *v1beta1.Cassandra, +) error { + gateways, err := r.IcadminAPI.GetGateways(c.Status.DataCentres[0].ID) + if err != nil { + return err + } + + for i, gateway := range gateways { + gatewayDVSize, err := resource.ParseQuantity(c.Spec.OnPremisesSpec.OSDiskSize) + if err != nil { + return err + } + + gatewayDVName := fmt.Sprintf("%s-%d-%s", models.GatewayDVPrefix, i, strings.ToLower(c.Spec.Name)) + gatewayDV, err := r.createDV(ctx, c, gatewayDVName, gateway.ID, gatewayDVSize, true) + if err != nil { + return err + } + + gatewayCPU := resource.Quantity{} + gatewayCPU.Set(c.Spec.OnPremisesSpec.SSHGatewayCPU) + + gatewayMemory, err := resource.ParseQuantity(c.Spec.OnPremisesSpec.SSHGatewayMemory) + if err != nil { + return err + } + + gatewayName := fmt.Sprintf("%s-%d-%s", models.GatewayVMPrefix, i, strings.ToLower(c.Spec.Name)) + + secretName, err := r.reconcileIgnitionScriptSecret(ctx, c, gatewayName, gateway.ID, gateway.Rack) + if err != nil { + return err + } + + gatewayVM := &virtcorev1.VirtualMachine{} + err = r.Get(ctx, types.NamespacedName{ + Namespace: c.Namespace, + Name: gatewayName, + }, gatewayVM) + if client.IgnoreNotFound(err) != nil { + return err + } + if k8serrors.IsNotFound(err) { + gatewayVM, err = r.newVM( + ctx, + c, + gatewayName, + gateway.ID, + gateway.Rack, + gatewayDV.Name, + secretName, + gatewayCPU, + gatewayMemory) + if err != nil { + return err + } + err = r.Client.Create(ctx, gatewayVM) + if err != nil { + return err + } + } + + gatewaySvcName := fmt.Sprintf("%s-%s", models.GatewaySvcPrefix, gatewayName) + gatewayExposeService := &k8scorev1.Service{} + err = r.Get(ctx, types.NamespacedName{ + Namespace: c.Namespace, + Name: gatewaySvcName, + }, gatewayExposeService) + + if client.IgnoreNotFound(err) != nil { + return err + } + if k8serrors.IsNotFound(err) { + gatewayExposeService = r.newExposeService(c, gatewaySvcName, gatewayName, gateway.ID) + err = r.Client.Create(ctx, gatewayExposeService) + if err != nil { + return err + } + } + } + + return nil +} + +func (r *CassandraReconciler) reconcileNodesResources( + ctx context.Context, + c *v1beta1.Cassandra, +) error { + nodes, err := r.IcadminAPI.GetOnPremisesNodes(c.Status.ID) + if err != nil { + return err + } + + for i, node := range nodes { + nodeOSDiskSize, err := resource.ParseQuantity(c.Spec.OnPremisesSpec.OSDiskSize) + if err != nil { + return err + } + + nodeOSDiskDVName := fmt.Sprintf("%s-%d-%s", models.NodeOSDVPrefix, i, strings.ToLower(c.Name)) + nodeOSDV, err := r.createDV(ctx, c, nodeOSDiskDVName, node.ID, nodeOSDiskSize, true) + if err != nil { + return err + } + + nodeDataDiskDVSize, err := resource.ParseQuantity(c.Spec.OnPremisesSpec.DataDiskSize) + if err != nil { + return err + } + + nodeDataDiskDVName := fmt.Sprintf("%s-%d-%s", models.NodeDVPrefix, i, strings.ToLower(c.Name)) + nodeDataDV, err := r.createDV(ctx, c, nodeDataDiskDVName, node.ID, nodeDataDiskDVSize, false) + if err != nil { + return err + } + + nodeCPU := resource.Quantity{} + nodeCPU.Set(c.Spec.OnPremisesSpec.NodeCPU) + + nodeMemory, err := resource.ParseQuantity(c.Spec.OnPremisesSpec.NodeMemory) + if err != nil { + return err + } + + nodeName := fmt.Sprintf("%s-%d-%s", models.NodeVMPrefix, i, strings.ToLower(c.Name)) + + secretName, err := r.reconcileIgnitionScriptSecret(ctx, c, nodeName, node.ID, node.Rack) + if err != nil { + return err + } + + nodeVM := &virtcorev1.VirtualMachine{} + err = r.Get(ctx, types.NamespacedName{ + Namespace: c.Namespace, + Name: nodeName, + }, nodeVM) + if client.IgnoreNotFound(err) != nil { + return err + } + if k8serrors.IsNotFound(err) { + nodeVM, err = r.newVM( + ctx, + c, + nodeName, + node.ID, + node.Rack, + nodeOSDV.Name, + secretName, + nodeCPU, + nodeMemory, + nodeDataDV.Name) + if err != nil { + return err + } + err = r.Client.Create(ctx, nodeVM) + if err != nil { + return err + } + } + + if !c.Spec.PrivateNetworkCluster { + nodeExposeName := fmt.Sprintf("%s-%s", models.NodeSvcPrefix, nodeName) + nodeExposeService := &k8scorev1.Service{} + err = r.Get(ctx, types.NamespacedName{ + Namespace: c.Namespace, + Name: nodeExposeName, + }, nodeExposeService) + if client.IgnoreNotFound(err) != nil { + return err + } + if k8serrors.IsNotFound(err) { + nodeExposeService = r.newExposeService(c, nodeExposeName, nodeName, node.ID) + err = r.Client.Create(ctx, nodeExposeService) + if err != nil { + return err + } + } + } + } + return nil +} + +func (r *CassandraReconciler) createDV( + ctx context.Context, + c *v1beta1.Cassandra, + name, + nodeID string, + size resource.Quantity, + isOSDisk bool, +) (*cdiv1beta1.DataVolume, error) { + dv := &cdiv1beta1.DataVolume{} + pvc := &k8scorev1.PersistentVolumeClaim{} + err := r.Get(ctx, types.NamespacedName{ + Namespace: c.Namespace, + Name: name, + }, pvc) + if client.IgnoreNotFound(err) != nil { + return nil, err + } + if k8serrors.IsNotFound(err) { + err = r.Get(ctx, types.NamespacedName{ + Namespace: c.Namespace, + Name: name, + }, dv) + if client.IgnoreNotFound(err) != nil { + return nil, err + } + if k8serrors.IsNotFound(err) { + if isOSDisk { + dv = r.newOSDiskDV(c, name, nodeID, size) + } else { + dv = r.newDataDiskDV(c, name, nodeID, size) + } + err = r.Client.Create(ctx, dv) + if err != nil { + return nil, err + } + } + } + return dv, nil +} + +func (r *CassandraReconciler) newOSDiskDV( + c *v1beta1.Cassandra, + name, + nodeID string, + size resource.Quantity, +) *cdiv1beta1.DataVolume { + return &cdiv1beta1.DataVolume{ + TypeMeta: metav1.TypeMeta{ + Kind: models.DVKind, + APIVersion: models.CDIKubevirtV1beta1APIVersion, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: c.Namespace, + Labels: map[string]string{ + models.ClusterIDLabel: c.Status.ID, + models.NodeIDLabel: nodeID, + }, + Finalizers: []string{models.DeletionFinalizer}, + }, + Spec: cdiv1beta1.DataVolumeSpec{ + Source: &cdiv1beta1.DataVolumeSource{ + HTTP: &cdiv1beta1.DataVolumeSourceHTTP{ + URL: c.Spec.OnPremisesSpec.OSImageURL, + }, + }, + PVC: &k8scorev1.PersistentVolumeClaimSpec{ + AccessModes: []k8scorev1.PersistentVolumeAccessMode{ + k8scorev1.ReadWriteOnce, + }, + Resources: k8scorev1.ResourceRequirements{ + Requests: k8scorev1.ResourceList{ + models.Storage: size, + }, + }, + StorageClassName: &c.Spec.OnPremisesSpec.StorageClassName, + }, + }, + } +} + +func (r *CassandraReconciler) newDataDiskDV( + c *v1beta1.Cassandra, + name, + nodeID string, + size resource.Quantity, +) *cdiv1beta1.DataVolume { + return &cdiv1beta1.DataVolume{ + TypeMeta: metav1.TypeMeta{ + Kind: models.DVKind, + APIVersion: models.CDIKubevirtV1beta1APIVersion, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: c.Namespace, + Labels: map[string]string{ + models.ClusterIDLabel: c.Status.ID, + models.NodeIDLabel: nodeID, + }, + Finalizers: []string{models.DeletionFinalizer}, + }, + Spec: cdiv1beta1.DataVolumeSpec{ + Source: &cdiv1beta1.DataVolumeSource{ + Blank: &cdiv1beta1.DataVolumeBlankImage{}, + }, + PVC: &k8scorev1.PersistentVolumeClaimSpec{ + AccessModes: []k8scorev1.PersistentVolumeAccessMode{ + k8scorev1.ReadWriteOnce, + }, + Resources: k8scorev1.ResourceRequirements{ + Requests: k8scorev1.ResourceList{ + models.Storage: size, + }, + }, + StorageClassName: &c.Spec.OnPremisesSpec.StorageClassName, + }, + }, + } +} + +func (r *CassandraReconciler) newExposeService( + c *v1beta1.Cassandra, + name, + vmName, + nodeID string, +) *k8scorev1.Service { + return &k8scorev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: models.ServiceKind, + APIVersion: models.K8sAPIVersionV1, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: c.Namespace, + Labels: map[string]string{ + models.ClusterIDLabel: c.Status.ID, + models.NodeIDLabel: nodeID, + }, + Finalizers: []string{models.DeletionFinalizer}, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: []k8scorev1.ServicePort{ + { + Port: models.Port22, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port22, + }, + }, + }, + Selector: map[string]string{ + models.KubevirtDomainLabel: vmName, + models.NodeIDLabel: nodeID, + }, + Type: models.LBType, + }, + } +} + +func (r *CassandraReconciler) newVM( + ctx context.Context, + c *v1beta1.Cassandra, + vmName, + nodeID, + nodeRack, + OSDiskDVName, + ignitionSecretName string, + cpu, + memory resource.Quantity, + storageDVNames ...string, +) (*virtcorev1.VirtualMachine, error) { + running := true + bootOrder1 := uint(1) + + cloudInitSecret := &k8scorev1.Secret{} + err := r.Get(ctx, types.NamespacedName{ + Namespace: c.Spec.OnPremisesSpec.CloudInitScriptNamespacedName.Namespace, + Name: c.Spec.OnPremisesSpec.CloudInitScriptNamespacedName.Name, + }, cloudInitSecret) + if err != nil { + return nil, err + } + + vm := &virtcorev1.VirtualMachine{ + TypeMeta: metav1.TypeMeta{ + Kind: models.VirtualMachineKind, + APIVersion: models.KubevirtV1APIVersion, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: vmName, + Namespace: c.Namespace, + Labels: map[string]string{ + models.ClusterIDLabel: c.Status.ID, + models.NodeIDLabel: nodeID, + models.NodeRackLabel: nodeRack, + models.KubevirtDomainLabel: vmName, + }, + //Finalizers: []string{models.DeletionFinalizer}, + }, + Spec: virtcorev1.VirtualMachineSpec{ + Running: &running, + Template: &virtcorev1.VirtualMachineInstanceTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + models.ClusterIDLabel: c.Status.ID, + models.NodeIDLabel: nodeID, + models.NodeRackLabel: nodeRack, + models.KubevirtDomainLabel: vmName, + }, + Finalizers: []string{models.DeletionFinalizer}, + }, + Spec: virtcorev1.VirtualMachineInstanceSpec{ + Domain: virtcorev1.DomainSpec{ + Resources: virtcorev1.ResourceRequirements{ + Requests: k8scorev1.ResourceList{ + models.CPU: cpu, + models.Memory: memory, + }, + }, + Devices: virtcorev1.Devices{ + Disks: []virtcorev1.Disk{ + { + Name: models.Boot, + BootOrder: &bootOrder1, + IO: models.Native, + Cache: models.None, + DiskDevice: virtcorev1.DiskDevice{ + Disk: &virtcorev1.DiskTarget{ + Bus: models.Virtio, + }, + }, + }, + { + Name: models.CloudInit, + DiskDevice: virtcorev1.DiskDevice{}, + Cache: models.None, + }, + { + Name: models.IgnitionDisk, + DiskDevice: virtcorev1.DiskDevice{}, + Serial: models.IgnitionSerial, + Cache: models.None, + }, + }, + Interfaces: []virtcorev1.Interface{ + { + Name: models.Default, + InterfaceBindingMethod: virtcorev1.InterfaceBindingMethod{ + Bridge: &virtcorev1.InterfaceBridge{}, + }, + }, + }, + }, + }, + Volumes: []virtcorev1.Volume{ + { + Name: models.Boot, + VolumeSource: virtcorev1.VolumeSource{ + PersistentVolumeClaim: &virtcorev1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: k8scorev1.PersistentVolumeClaimVolumeSource{ + ClaimName: OSDiskDVName, + }, + }, + }, + }, + { + Name: models.CloudInit, + VolumeSource: virtcorev1.VolumeSource{ + CloudInitNoCloud: &virtcorev1.CloudInitNoCloudSource{ + UserDataSecretRef: &k8scorev1.LocalObjectReference{ + Name: c.Spec.OnPremisesSpec.CloudInitScriptNamespacedName.Name, + }, + }, + }, + }, + { + Name: models.IgnitionDisk, + VolumeSource: virtcorev1.VolumeSource{ + Secret: &virtcorev1.SecretVolumeSource{ + SecretName: ignitionSecretName, + }, + }, + }, + }, + Networks: []virtcorev1.Network{ + { + Name: models.Default, + NetworkSource: virtcorev1.NetworkSource{ + Pod: &virtcorev1.PodNetwork{}, + }, + }, + }, + }, + }, + }, + } + + for i, dvName := range storageDVNames { + diskName := fmt.Sprintf("%s-%d-%s", models.DataDisk, i, vm.Name) + vm.Spec.Template.Spec.Domain.Devices.Disks = append(vm.Spec.Template.Spec.Domain.Devices.Disks, virtcorev1.Disk{ + Name: diskName, + IO: models.Native, + Cache: models.None, + DiskDevice: virtcorev1.DiskDevice{ + Disk: &virtcorev1.DiskTarget{ + Bus: models.Virtio, + }, + }, + Serial: models.DataDiskSerial, + }) + vm.Spec.Template.Spec.Volumes = append(vm.Spec.Template.Spec.Volumes, virtcorev1.Volume{ + Name: diskName, + VolumeSource: virtcorev1.VolumeSource{ + PersistentVolumeClaim: &virtcorev1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: k8scorev1.PersistentVolumeClaimVolumeSource{ + ClaimName: dvName, + }, + }, + }, + }) + } + + return vm, nil +} + +func (r *CassandraReconciler) reconcileIgnitionScriptSecret( + ctx context.Context, + c *v1beta1.Cassandra, + nodeName, + nodeID, + nodeRack string, +) (string, error) { + ignitionSecret := &k8scorev1.Secret{} + err := r.Get(ctx, types.NamespacedName{ + Namespace: c.Namespace, + Name: fmt.Sprintf("%s-%s", models.IgnitionScriptSecretPrefix, nodeName), + }, ignitionSecret) + if client.IgnoreNotFound(err) != nil { + return "", err + } + if k8serrors.IsNotFound(err) { + script, err := r.IcadminAPI.GetIgnitionScript(nodeID) + if err != nil { + return "", err + } + + ignitionSecret = &k8scorev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: models.SecretKind, + APIVersion: models.K8sAPIVersionV1, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", models.IgnitionScriptSecretPrefix, nodeName), + Namespace: c.Namespace, + Labels: map[string]string{ + models.ControlledByLabel: c.Name, + models.ClusterIDLabel: c.Status.ID, + models.NodeIDLabel: nodeID, + models.NodeRackLabel: nodeRack, + }, + Finalizers: []string{models.DeletionFinalizer}, + }, + StringData: map[string]string{ + models.Script: script, + }, + } + err = r.Create(ctx, ignitionSecret) + if err != nil { + return "", err + } + } + + return ignitionSecret.Name, nil +} + +func (r *CassandraReconciler) deleteOnPremResources( + ctx context.Context, + c *v1beta1.Cassandra, +) error { + vms := &virtcorev1.VirtualMachineList{} + err := r.List(ctx, vms, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: c.Status.ID, + }), + Namespace: c.Namespace, + }) + if err != nil { + return err + } + + for _, vm := range vms.Items { + err = r.Delete(ctx, &vm) + if err != nil { + return err + } + + patch := client.MergeFrom(vm.DeepCopy()) + controllerutil.RemoveFinalizer(&vm, models.DeletionFinalizer) + err = r.Patch(ctx, &vm, patch) + if err != nil { + return err + } + } + + vmis := &virtcorev1.VirtualMachineInstanceList{} + err = r.List(ctx, vmis, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: c.Status.ID, + }), + Namespace: c.Namespace, + }) + if err != nil { + return err + } + + for _, vmi := range vmis.Items { + err = r.Delete(ctx, &vmi) + if err != nil { + return err + } + + patch := client.MergeFrom(vmi.DeepCopy()) + controllerutil.RemoveFinalizer(&vmi, models.DeletionFinalizer) + err = r.Patch(ctx, &vmi, patch) + if err != nil { + return err + } + } + + dvs := &cdiv1beta1.DataVolumeList{} + err = r.List(ctx, dvs, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: c.Status.ID, + }), + Namespace: c.Namespace, + }) + if err != nil { + return err + } + + for _, dv := range dvs.Items { + err = r.Delete(ctx, &dv) + if err != nil { + return err + } + + patch := client.MergeFrom(dv.DeepCopy()) + controllerutil.RemoveFinalizer(&dv, models.DeletionFinalizer) + err = r.Patch(ctx, &dv, patch) + if err != nil { + return err + } + } + + svcs := &k8scorev1.ServiceList{} + err = r.List(ctx, svcs, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: c.Status.ID, + }), + Namespace: c.Namespace, + }) + if err != nil { + return err + } + + for _, svc := range svcs.Items { + err = r.Delete(ctx, &svc) + if err != nil { + return err + } + + patch := client.MergeFrom(svc.DeepCopy()) + controllerutil.RemoveFinalizer(&svc, models.DeletionFinalizer) + err = r.Patch(ctx, &svc, patch) + if err != nil { + return err + } + } + + secrets := &k8scorev1.SecretList{} + err = r.List(ctx, secrets, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: c.Status.ID, + }), + Namespace: c.Namespace, + }) + if err != nil { + return err + } + + for _, secret := range secrets.Items { + err = r.Delete(ctx, &secret) + if err != nil { + return err + } + + patch := client.MergeFrom(secret.DeepCopy()) + controllerutil.RemoveFinalizer(&secret, models.DeletionFinalizer) + err = r.Patch(ctx, &secret, patch) + if err != nil { + return err + } + } + return nil +} + func (r *CassandraReconciler) handleExternalDelete(ctx context.Context, c *v1beta1.Cassandra) error { l := log.FromContext(ctx) diff --git a/controllers/clusters/kafka_controller.go b/controllers/clusters/kafka_controller.go index e1eca1ce2..1c14d00fc 100644 --- a/controllers/clusters/kafka_controller.go +++ b/controllers/clusters/kafka_controller.go @@ -213,7 +213,7 @@ func (r *KafkaReconciler) handleUpdateCluster( return models.ExitReconcile } - if iKafka.Status.ClusterStatus.State != StatusRUNNING { + if iKafka.Status.ClusterStatus.State != models.RunningStatus { l.Error(instaclustr.ClusterNotRunning, "Unable to update cluster, cluster still not running", "cluster name", k.Spec.Name, "cluster state", iKafka.Status.ClusterStatus.State) diff --git a/controllers/clusters/kafkaconnect_controller.go b/controllers/clusters/kafkaconnect_controller.go index 3d29f2fcd..3809d79ca 100644 --- a/controllers/clusters/kafkaconnect_controller.go +++ b/controllers/clusters/kafkaconnect_controller.go @@ -200,7 +200,7 @@ func (r *KafkaConnectReconciler) handleUpdateCluster(ctx context.Context, kc *v1 l.Error(err, "Cannot convert Kafka Connect from Instaclustr", "ClusterID", kc.Status.ID) r.EventRecorder.Eventf( - kc, models.Warning, models.ConvertionFailed, + kc, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err, ) diff --git a/controllers/clusters/opensearch_controller.go b/controllers/clusters/opensearch_controller.go index 6c9448f12..d097d7d7e 100644 --- a/controllers/clusters/opensearch_controller.go +++ b/controllers/clusters/opensearch_controller.go @@ -274,7 +274,7 @@ func (r *OpenSearchReconciler) HandleUpdateCluster( "cluster ID", o.Status.ID, ) - r.EventRecorder.Eventf(o, models.Warning, models.ConvertionFailed, + r.EventRecorder.Eventf(o, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err) return models.ReconcileRequeue diff --git a/controllers/clusters/postgresql_controller.go b/controllers/clusters/postgresql_controller.go index fe443c5cb..13aebefa4 100644 --- a/controllers/clusters/postgresql_controller.go +++ b/controllers/clusters/postgresql_controller.go @@ -331,7 +331,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster( ) r.EventRecorder.Eventf( - pg, models.Warning, models.ConvertionFailed, + pg, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err, ) diff --git a/controllers/clusters/redis_controller.go b/controllers/clusters/redis_controller.go index 18d6b923d..7cacee38e 100644 --- a/controllers/clusters/redis_controller.go +++ b/controllers/clusters/redis_controller.go @@ -318,7 +318,7 @@ func (r *RedisReconciler) handleUpdateCluster( ) r.EventRecorder.Eventf( - redis, models.Warning, models.ConvertionFailed, + redis, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err, ) diff --git a/main.go b/main.go index ac0ecfd6c..88ff4173c 100644 --- a/main.go +++ b/main.go @@ -26,6 +26,8 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/apimachinery/pkg/runtime" + virtcorev1 "kubevirt.io/api/core/v1" + cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" @@ -56,6 +58,8 @@ func init() { utilruntime.Must(clustersv1beta1.AddToScheme(scheme)) utilruntime.Must(clusterresourcesv1beta1.AddToScheme(scheme)) utilruntime.Must(kafkamanagementv1beta1.AddToScheme(scheme)) + utilruntime.Must(cdiv1beta1.AddToScheme(scheme)) + utilruntime.Must(virtcorev1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } @@ -109,6 +113,8 @@ func main() { username := os.Getenv("USERNAME") key := os.Getenv("APIKEY") serverHostname := os.Getenv("HOSTNAME") + icadminUsername := os.Getenv("ICADMIN_USERNAME") + icadminKey := os.Getenv("ICADMIN_APIKEY") instaClient := instaclustr.NewClient( username, @@ -117,6 +123,13 @@ func main() { instaclustr.DefaultTimeout, ) + icadminClient := instaclustr.NewIcadminClient( + icadminUsername, + icadminKey, + serverHostname, + instaclustr.DefaultTimeout, + ) + s := scheduler.NewScheduler(log.Log.WithValues("component", "scheduler")) eventRecorder := mgr.GetEventRecorderFor("instaclustr-operator") @@ -125,6 +138,7 @@ func main() { Client: mgr.GetClient(), Scheme: mgr.GetScheme(), API: instaClient, + IcadminAPI: icadminClient, Scheduler: s, EventRecorder: eventRecorder, }).SetupWithManager(mgr); err != nil { diff --git a/pkg/instaclustr/client.go b/pkg/instaclustr/client.go index fbfd87979..f37bfc92a 100644 --- a/pkg/instaclustr/client.go +++ b/pkg/instaclustr/client.go @@ -38,8 +38,8 @@ type Client struct { } func NewClient( - username string, - key string, + username, + key, serverHostname string, timeout time.Duration, ) *Client { diff --git a/pkg/instaclustr/config.go b/pkg/instaclustr/config.go index 8bd94f499..d23f4159a 100644 --- a/pkg/instaclustr/config.go +++ b/pkg/instaclustr/config.go @@ -77,3 +77,13 @@ const ( RedisUserIDFmt = "%s_%s" CassandraBundleUser = "apache_cassandra" ) + +// constants for icadminAPI v1 +const ( + GatewayEndpoint = "%s/admin/v1/gateways?term=%s" + GatewayPrivateIPEndpoint = "%s/admin/v1/gateways/%s/private-gateway?private_address=%s&gateway_type=SSH" + GatewayPublicIPEndpoint = "%s/admin/v1/gateways/%s/public-gateway?public_address=%s&gateway_type=SSH" + NodeIPsEndpoint = "%s/admin/v1/nodes/%s" + NodesEndpoint = "%s/admin/v1/nodes?term=%s" + IgnitionScriptEndpoint = "%s/admin/v1/nodes/%s/debian-ignition-script" +) diff --git a/pkg/instaclustr/icadmin-client.go b/pkg/instaclustr/icadmin-client.go new file mode 100644 index 000000000..6a2a4ed39 --- /dev/null +++ b/pkg/instaclustr/icadmin-client.go @@ -0,0 +1,226 @@ +package instaclustr + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/instaclustr/operator/apis/clusters/v1beta1" + "io" + "net/http" + "time" +) + +type IcadminClient struct { + icadminUsername string + icadminKey string + serverHostname string + httpClient *http.Client +} + +func NewIcadminClient( + icadminUsername, + icadminKey, + serverHostname string, + timeout time.Duration, +) *IcadminClient { + httpClient := &http.Client{ + Timeout: timeout, + Transport: &http.Transport{}, + } + return &IcadminClient{ + icadminUsername: icadminUsername, + icadminKey: icadminKey, + serverHostname: serverHostname, + httpClient: httpClient, + } +} + +func (c *IcadminClient) DoRequest(url string, method string, data []byte) (*http.Response, error) { + req, err := http.NewRequest(method, url, bytes.NewBuffer(data)) + if err != nil { + return nil, err + } + req.SetBasicAuth(c.icadminUsername, c.icadminKey) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Instaclustr-Source", OperatorVersion) + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *IcadminClient) GetIgnitionScript(nodeID string) (string, error) { + url := fmt.Sprintf(IgnitionScriptEndpoint, c.serverHostname, nodeID) + resp, err := c.DoRequest(url, http.MethodGet, nil) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + + if resp.StatusCode == http.StatusNotFound { + return "", NotFound + } + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("status code: %d, message: %s", resp.StatusCode, body) + } + + holder := struct { + Script string `json:"script"` + }{} + + err = json.Unmarshal(body, &holder) + if err != nil { + return "", err + } + + return holder.Script, nil +} + +func (c *IcadminClient) GetGateways(cdcID string) ([]*v1beta1.Gateway, error) { + url := fmt.Sprintf(GatewayEndpoint, c.serverHostname, cdcID) + resp, err := c.DoRequest(url, http.MethodGet, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusNotFound { + return nil, NotFound + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("status code: %d, message: %s", resp.StatusCode, body) + } + + holder := struct { + Gateways []*v1beta1.Gateway `json:"gateways"` + }{} + + err = json.Unmarshal(body, &holder) + if err != nil { + return nil, err + } + + return holder.Gateways, nil +} + +func (c *IcadminClient) GetOnPremisesNodes(clusterID string) ([]*v1beta1.OnPremiseNode, error) { + url := fmt.Sprintf(NodesEndpoint, c.serverHostname, clusterID) + resp, err := c.DoRequest(url, http.MethodGet, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusNotFound { + return nil, NotFound + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("status code: %d, message: %s", resp.StatusCode, body) + } + + holder := struct { + Nodes []*v1beta1.OnPremiseNode `json:"nodes"` + }{} + + err = json.Unmarshal(body, &holder) + if err != nil { + return nil, err + } + + return holder.Nodes, nil +} + +func (c *IcadminClient) SetPrivateGatewayIP(gatewayID, ip string) error { + url := fmt.Sprintf(GatewayPrivateIPEndpoint, c.serverHostname, gatewayID, ip) + + resp, err := c.DoRequest(url, http.MethodPut, nil) + if err != nil { + return err + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("status code: %d, message: %s", resp.StatusCode, body) + } + + return nil +} + +func (c *IcadminClient) SetPublicGatewayIP(gatewayID, ip string) error { + url := fmt.Sprintf(GatewayPublicIPEndpoint, c.serverHostname, gatewayID, ip) + + resp, err := c.DoRequest(url, http.MethodPut, nil) + if err != nil { + return err + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("status code: %d, message: %s", resp.StatusCode, body) + } + + return nil +} + +func (c *IcadminClient) SetNodeIPs(nodeID string, request *v1beta1.OnPremiseNode) error { + url := fmt.Sprintf(NodeIPsEndpoint, c.serverHostname, nodeID) + + holder := struct { + Updates *v1beta1.OnPremiseNode `json:"updates"` + }{ + Updates: request, + } + + data, err := json.Marshal(holder) + if err != nil { + return err + } + + resp, err := c.DoRequest(url, http.MethodPut, data) + if err != nil { + return err + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("status code: %d, message: %s", resp.StatusCode, body) + } + + return nil +} diff --git a/pkg/instaclustr/interfaces.go b/pkg/instaclustr/interfaces.go index fca23f78d..2b2cb8e5f 100644 --- a/pkg/instaclustr/interfaces.go +++ b/pkg/instaclustr/interfaces.go @@ -104,3 +104,12 @@ type API interface { DeleteAWSEndpointServicePrincipal(principalID string) error GetResizeOperationsByClusterDataCentreID(cdcID string) ([]*v1beta1.ResizeOperation, error) } + +type IcadminAPI interface { + GetGateways(cdcID string) ([]*v1beta1.Gateway, error) + GetOnPremisesNodes(clusterID string) ([]*v1beta1.OnPremiseNode, error) + SetPrivateGatewayIP(gatewayID, ip string) error + SetPublicGatewayIP(gatewayID, ip string) error + SetNodeIPs(nodeID string, request *v1beta1.OnPremiseNode) error + GetIgnitionScript(nodeID string) (string, error) +} diff --git a/pkg/instaclustr/mock/client.go b/pkg/instaclustr/mock/client.go index 92da8f4c6..6889826b8 100644 --- a/pkg/instaclustr/mock/client.go +++ b/pkg/instaclustr/mock/client.go @@ -375,3 +375,32 @@ func (c *mockClient) GetResizeOperationsByClusterDataCentreID(cdcID string) ([]* func (c *mockClient) GetAWSVPCPeering(peerID string) (*models.AWSVPCPeering, error) { panic("GetAWSVPCPeering: is not implemented") } + +// +//func (c *mockClient) CreateOnPremisesCluster(url string, cluster any) (string, error) { +// panic("CreateOnPremisesCluster: is not implemented)") +//} + +func (c *mockClient) GetGateways(cdcID string) ([]*clustersv1beta1.Gateway, error) { + panic("GetGateways: is not implemented") +} + +func (c *mockClient) GetOnPremisesNodes(clusterID string) ([]*clustersv1beta1.OnPremiseNode, error) { + panic("GetOnPremisesNodes: is not implemented") +} + +func (c *mockClient) SetPrivateGatewayIP(gatewayID, ip string) error { + panic("SetPrivateGatewayIP: is not implemented") +} + +func (c *mockClient) SetPublicGatewayIP(gatewayID, ip string) error { + panic("SetPublicGatewayIP: is not implemented") +} + +func (c *mockClient) SetNodeIPs(nodeID string, request *clustersv1beta1.OnPremiseNode) error { + panic("SetNodeIPs: is not implemented") +} + +func (c *mockClient) GetIgnitionScript(nodeID string) (string, error) { + panic("GetIgnitionScript: is not implemented") +} diff --git a/pkg/models/on_premises.go b/pkg/models/on_premises.go new file mode 100644 index 000000000..d7d013b56 --- /dev/null +++ b/pkg/models/on_premises.go @@ -0,0 +1,42 @@ +package models + +const ( + ONPREMISES = "ONPREMISES" + CLIENTDC = "CLIENT_DC" + + VirtualMachineKind = "VirtualMachine" + DVKind = "DataVolume" + ServiceKind = "Service" + KubevirtV1APIVersion = "kubevirt.io/v1" + CDIKubevirtV1beta1APIVersion = "cdi.kubevirt.io/v1beta1" + + KubevirtDomainLabel = "kubevirt.io/domain" + NodeIDLabel = "nodeID" + NodeRackLabel = "nodeRack" + NodeOSDVPrefix = "node-os-data-volume-pvc" + NodeDVPrefix = "node-data-volume-pvc" + NodeVMPrefix = "node-vm" + NodeSvcPrefix = "node-service" + GatewayDVPrefix = "gateway-data-volume-pvc" + GatewayVMPrefix = "gateway-vm" + GatewaySvcPrefix = "gateway-service" + IgnitionScriptSecretPrefix = "ignition-script-secret" + DataDisk = "data-disk" + + Boot = "boot" + Storage = "storage" + CPU = "cpu" + Memory = "memory" + Virtio = "virtio" + Native = "native" + None = "none" + Script = "script" + IgnitionDisk = "ignition" + Default = "default" + CloudInit = "cloud-init" + DataDiskSerial = "DATADISK" + IgnitionSerial = "IGNITION" + + LBType = "LoadBalancer" + Port22 = 22 +) diff --git a/pkg/models/operator.go b/pkg/models/operator.go index a5950fe77..88bc10b11 100644 --- a/pkg/models/operator.go +++ b/pkg/models/operator.go @@ -35,7 +35,7 @@ const ( ClusterIDLabel = "instaclustr.com/clusterID" ClusterNameLabel = "instaclustr.com/clusterName" ClustersV1beta1APIVersion = "clusters.instaclustr.com/v1beta1" - ClusterresourcesV1beta1APIVersion = "clusterresources.instaclustr.com/v1beta1" + ClusterResourcesV1beta1APIVersion = "clusterresources.instaclustr.com/v1beta1" RedisUserNamespaceLabel = "instaclustr.com/redisUserNamespace" PostgreSQLUserNamespaceLabel = "instaclustr.com/postgresqlUserNamespace" OpenSearchUserNamespaceLabel = "instaclustr.com/openSearchUserNamespace" @@ -132,7 +132,7 @@ const ( CreationFailed = "CreationFailed" FetchFailed = "FetchFailed" GenerateFailed = "GenerateFailed" - ConvertionFailed = "ConvertionFailed" + ConversionFailed = "ConversionFailed" ValidationFailed = "ValidationFailed" UpdateFailed = "UpdateFailed" ExternalChanges = "ExternalChanges" diff --git a/pkg/models/validation.go b/pkg/models/validation.go index f1b754b42..7bd6f8980 100644 --- a/pkg/models/validation.go +++ b/pkg/models/validation.go @@ -58,6 +58,12 @@ var ( S3URIRegExp = "^s3:\\/\\/[a-zA-Z0-9_-]+[^\\/]$" DependencyVPCs = []string{"TARGET_VPC", "VPC_PEERED", "SEPARATE_VPC"} EncryptionKeyAliasRegExp = "^[a-zA-Z0-9_-]{1}[a-zA-Z0-9 _-]*$" + MemoryRegExp = "^\\d+(Ei|Pi|Ti|Gi|Mi|Ki)?$" + StorageRegExp = "^\\d+(Gi|Ti|Pi|Ei)?$" + + CassandraOnPremNodes = []string{"CAS-PRD-OP.4.8-400", "CAS-PRD-OP.4.8-200", "CAS-PRD-OP.8.16-200", + "CAS-PRD-OP.8.16-400", "CAS-PRD-OP.16.32-400", "CAS-PRD-OP.16.32-200", "CAS-PRD-OP.32.64-200", + "CAS-PRD-OP.32.64-400"} CassandraReplicationFactors = []int{2, 3, 5} KafkaReplicationFactors = []int{3, 5} diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 0d025b664..dda9835e0 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -24,13 +24,18 @@ import ( "github.com/go-logr/logr" ) -var ClusterStatusInterval time.Duration -var ClusterBackupsInterval time.Duration -var UserCreationInterval time.Duration +var ( + ClusterStatusInterval time.Duration + ClusterBackupsInterval time.Duration + UserCreationInterval time.Duration +) -const StatusChecker = "statusChecker" -const BackupsChecker = "backupsChecker" -const UserCreator = "userCreator" +const ( + StatusChecker = "statusChecker" + BackupsChecker = "backupsChecker" + UserCreator = "userCreator" + OnPremisesIPsChecker = "onPremisesIPsChecker" +) type Job func() error diff --git a/scripts/cloud-init-script-example.sh b/scripts/cloud-init-script-example.sh new file mode 100644 index 000000000..d864bd252 --- /dev/null +++ b/scripts/cloud-init-script-example.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +export NEW_PASS="qwerty12345" +export SSH_PUB_KEY="" +export BOOTSTRAP_SSH_KEY="" + +echo "debian:$NEW_PASS" | chpasswd +echo "root:$NEW_PASS" | sudo chpasswd root +sudo echo "$SSH_PUB_KEY" >> /home/debian/.ssh/authorized_keys +sudo echo "$BOOTSTRAP_SSH_KEY" >> /home/debian/.ssh/authorized_keys +sudo chown -R debian: /home/debian/.ssh +sudo cp /usr/share/doc/apt/examples/sources.list /etc/apt/sources.list +data_device=$(lsblk -dfn -o NAME,SERIAL | awk '$2 == "DATADISK" {print $1}') +sudo mkfs -t ext4 /dev/"${data_device}" +ignition_device=$(lsblk -dfn -o NAME,SERIAL | awk '$2 == "IGNITION" {print $1}') +sudo mkdir /ignition +sudo mount /dev/"${ignition_device}" /ignition/ +sudo cp /ignition/script /ignition.sh +sudo chmod +x /ignition.sh +/ignition.sh +END \ No newline at end of file diff --git a/scripts/cloud-init-secret.yaml b/scripts/cloud-init-secret.yaml new file mode 100644 index 000000000..ccbe8f6ff --- /dev/null +++ b/scripts/cloud-init-secret.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Secret +metadata: + name: instaclustr-cloud-init-secret +data: + userdata: \ No newline at end of file diff --git a/scripts/make_creds_secret.sh b/scripts/make_creds_secret.sh index b87a59b65..926fa1252 100755 --- a/scripts/make_creds_secret.sh +++ b/scripts/make_creds_secret.sh @@ -6,6 +6,8 @@ path=$(readlink -f ../.env) export USERNAME=$(echo -n $USERNAME | base64) export APIKEY=$(echo -n $APIKEY | base64) export HOSTNAME=$(echo -n $HOSTNAME | base64) +export ICADMIN_USERNAME=$(echo -n $ICADMIN_USERNAME | base64) +export ICADMIN_APIKEY=$(echo -n $ICADMIN_APIKEY | base64) ( echo "cat <../config/manager/creds_secret.yaml"; cat secret.yaml; diff --git a/scripts/secret.yaml b/scripts/secret.yaml index 08212ba02..83b4e248a 100644 --- a/scripts/secret.yaml +++ b/scripts/secret.yaml @@ -8,3 +8,5 @@ data: USERNAME: $USERNAME APIKEY: $APIKEY HOSTNAME: $HOSTNAME + ICADMIN_USERNAME: $ICADMIN_USERNAME + ICADMIN_APIKEY: $ICADMIN_APIKEY