diff --git a/.golangci.yml b/.golangci.yml index 57a988d69..78cd3aab8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -41,6 +41,7 @@ linters-settings: - "github.com/kelseyhightower/envconfig" - "github.com/red-hat-storage/ocs-operator" - "github.com/stmcginnis/gofish" + - "github.com/stolostron/multiclusterhub-operator/api/v1" revive: rules: - name: indent-error-flow @@ -124,13 +125,10 @@ output: issues: include: - EXC0002 # disable excluding of issues about comments from golint - - EXC0012 # EXC0012 revive: Annoying issue about not having a comment. The rare codebase has such comments - - EXC0014 # EXC0014 revive: Annoying issue about not having a comment. The rare codebase has such comments + - EXC0012 # EXC0012 revive: Annoying issue about not having a comment. The rare codebase has such comments + - EXC0014 # EXC0014 revive: Annoying issue about not having a comment. The rare codebase has such comments exclude-rules: #- # Exclude some linters from running on tests files. - - path: 'pkg/polarion' + - path: "pkg/polarion" linters: - exhaustive - - - diff --git a/go.mod b/go.mod index 11878ffac..3f8412db4 100644 --- a/go.mod +++ b/go.mod @@ -49,9 +49,11 @@ require ( github.com/kelseyhightower/envconfig v1.4.0 github.com/openshift/assisted-service/api v0.0.0 github.com/openshift/assisted-service/models v0.0.0 + github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 github.com/openshift/elasticsearch-operator v0.0.0-20220613183908-e1648e67c298 github.com/red-hat-storage/ocs-operator v0.4.13 github.com/stmcginnis/gofish v0.15.0 + github.com/stolostron/multiclusterhub-operator v0.0.0-20220516144733-74b7bd46ac55 github.com/stretchr/testify v1.9.0 github.com/vmware-tanzu/velero v1.12.1 golang.org/x/crypto v0.23.0 @@ -73,6 +75,9 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect + github.com/bshuster-repo/logrus-logstash-hook v1.0.2 // indirect + github.com/bugsnag/bugsnag-go v2.1.2+incompatible // indirect + github.com/bugsnag/panicwrap v1.3.4 // indirect github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect @@ -110,6 +115,7 @@ require ( github.com/go-openapi/validate v0.22.0 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-test/deep v1.1.0 // indirect + github.com/gofrs/uuid v4.2.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -165,7 +171,6 @@ require ( github.com/oklog/ulid v1.3.1 // indirect github.com/onsi/gomega v1.31.1 // indirect github.com/openshift/assisted-service v1.0.10-0.20230830164851-6573b5d7021d // indirect - github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 // indirect github.com/openshift/library-go v0.0.0-20231027143522-b8cd45d2d2c8 // indirect github.com/operator-framework/operator-registry v1.35.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect diff --git a/go.sum b/go.sum index 76cf3e2cd..46f9f662f 100644 --- a/go.sum +++ b/go.sum @@ -702,12 +702,13 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1 h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/bugsnag/bugsnag-go v1.5.3 h1:yeRUT3mUE13jL1tGwvoQsKdVbAsQx9AJ+fqahKveP04= -github.com/bugsnag/bugsnag-go v1.5.3/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/panicwrap v1.2.0 h1:OzrKrRvXis8qEvOkfcxNcYbOd2O7xXS2nnKMEMABFQA= -github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bshuster-repo/logrus-logstash-hook v1.0.2 h1:JYRWo+QGnQdedgshosug9hxpPYTB9oJ1ZZD3fY31alU= +github.com/bshuster-repo/logrus-logstash-hook v1.0.2/go.mod h1:HgYntJprnHSPaF9VPPPLP1L5S1vMWxRfa1J+vzDrDTw= +github.com/bugsnag/bugsnag-go v2.1.2+incompatible h1:E7dor84qzwUO8KdCM68CZwq9QOSR7HXlLx3Wj5vui2s= +github.com/bugsnag/bugsnag-go v2.1.2+incompatible/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/panicwrap v1.3.4 h1:A6sXFtDGsgU/4BLf5JT0o5uYg3EeKgGx3Sfs+/uk3pU= +github.com/bugsnag/panicwrap v1.3.4/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= @@ -1005,8 +1006,8 @@ github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY9 github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus v0.0.0-20181025153459-66d97aec3384/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= -github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -1677,6 +1678,8 @@ github.com/stmcginnis/gofish v0.15.0 h1:8TG41+lvJk/0Nf8CIIYErxbMlQUy80W0JFRZP3Ld github.com/stmcginnis/gofish v0.15.0/go.mod h1:BLDSFTp8pDlf/xDbLZa+F7f7eW0E/CHCboggsu8CznI= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stolostron/multiclusterhub-operator v0.0.0-20220516144733-74b7bd46ac55 h1:7+GfRgTFlILbeLbnvCO/KAFhNn9RoWGnOcwL21f1nxI= +github.com/stolostron/multiclusterhub-operator v0.0.0-20220516144733-74b7bd46ac55/go.mod h1:MOs4J7cOEv55UQypbSNxshR9nEzyuR3ohRs7EVt4Ato= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= diff --git a/pkg/clients/clients.go b/pkg/clients/clients.go index 598f56e28..e5c4d3a64 100644 --- a/pkg/clients/clients.go +++ b/pkg/clients/clients.go @@ -67,6 +67,7 @@ import ( agentInstallV1Beta1 "github.com/openshift/assisted-service/api/v1beta1" hiveV1 "github.com/openshift/hive/apis/hive/v1" moduleV1Beta1 "github.com/rh-ecosystem-edge/kernel-module-management/api/v1beta1" + mchv1 "github.com/stolostron/multiclusterhub-operator/api/v1" "k8s.io/client-go/kubernetes/scheme" coreV1Client "k8s.io/client-go/kubernetes/typed/core/v1" storageV1Client "k8s.io/client-go/kubernetes/typed/storage/v1" @@ -269,6 +270,10 @@ func SetScheme(crScheme *runtime.Scheme) error { return err } + if err := mchv1.AddToScheme(crScheme); err != nil { + return err + } + if err := configV1.Install(crScheme); err != nil { return err } @@ -514,6 +519,8 @@ func GetTestClients(tcp TestClientParams) *Settings { genericClientObjects = append(genericClientObjects, v) case *performanceV2.PerformanceProfile: genericClientObjects = append(genericClientObjects, v) + case *mchv1.MultiClusterHub: + genericClientObjects = append(genericClientObjects, v) case *tunedv1.Tuned: genericClientObjects = append(genericClientObjects, v) case *agentInstallV1Beta1.AgentServiceConfig: diff --git a/pkg/multiclusterhub/multiclusterhub.go b/pkg/multiclusterhub/multiclusterhub.go new file mode 100644 index 000000000..3ac122059 --- /dev/null +++ b/pkg/multiclusterhub/multiclusterhub.go @@ -0,0 +1,219 @@ +package multiclusterhub + +import ( + "context" + "fmt" + + "github.com/golang/glog" + "github.com/openshift-kni/eco-goinfra/pkg/clients" + "github.com/openshift-kni/eco-goinfra/pkg/msg" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + mchv1 "github.com/stolostron/multiclusterhub-operator/api/v1" + goclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +// MultiClusterHubBuilder provides struct for the MultiClusterHub object containing connection to +// the cluster and the MultiClusterHub definitions. +type MultiClusterHubBuilder struct { + Definition *mchv1.MultiClusterHub + Object *mchv1.MultiClusterHub + errorMsg string + apiClient goclient.Client +} + +// NewMultiClusterHubBuilder creates a new instance of MultiClusterHubBuilder. +func NewMultiClusterHubBuilder(apiClient *clients.Settings, name, namespace string) *MultiClusterHubBuilder { + glog.V(100).Infof( + `Initializing new MultiClusterHub structure with the following params: name: %s, namespace:`, + name, namespace) + + if apiClient == nil { + glog.V(100).Infof("apiClient cannot be nil") + + return nil + } + + builder := MultiClusterHubBuilder{ + apiClient: apiClient.Client, + Definition: &mchv1.MultiClusterHub{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + }, + } + + if name == "" { + glog.V(100).Infof("The name of the MultiClusterHub is empty") + + return nil + } + + if namespace == "" { + glog.V(100).Infof("The namespace of the MultiClusterHub is empty") + + return nil + } + + return &builder +} + +// PullMultiClusterHub loads an existing MultiClusterHub into MultiClusterHubBuilder struct. +func PullMultiClusterHub(apiClient *clients.Settings, name, namespace string) (*MultiClusterHubBuilder, error) { + glog.V(100).Infof("Pulling existing MultiClusterHub name: %s from namespace %s", name, namespace) + + if apiClient == nil { + return nil, fmt.Errorf("apiClient cannot be nil") + } + + builder := MultiClusterHubBuilder{ + apiClient: apiClient.Client, + Definition: &mchv1.MultiClusterHub{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + }, + } + + if name == "" { + builder.errorMsg = "MultiClusterHub 'name' cannot be empty" + + return &builder, nil + } + + if namespace == "" { + builder.errorMsg = "MultiClusterHub 'namespace' cannot be empty" + + return &builder, nil + } + + if !builder.Exists() { + return nil, fmt.Errorf("MultiClusterHub object %s does not exist", name) + } + + builder.Definition = builder.Object + + return &builder, nil +} + +// Get fetches the defined MultiClusterHub from the cluster. +func (builder *MultiClusterHubBuilder) Get() (*mchv1.MultiClusterHub, error) { + if valid, err := builder.validate(); !valid { + return nil, err + } + + glog.V(100).Infof("Getting MultiClusterHub %s from namespace %s", + builder.Definition.Name, builder.Definition.Namespace) + + multiClusterHub := &mchv1.MultiClusterHub{} + err := builder.apiClient.Get(context.TODO(), goclient.ObjectKey{ + Name: builder.Definition.Name, + Namespace: builder.Definition.Namespace, + }, multiClusterHub) + + if err != nil { + return nil, err + } + + return multiClusterHub, err +} + +// Update modifies an existing MultiClusterHub on the cluster. +func (builder *MultiClusterHubBuilder) Update() (*MultiClusterHubBuilder, error) { + if valid, err := builder.validate(); !valid { + return builder, err + } + + glog.V(100).Infof("Updating MultiClusterHub %s in the namespace %s", + builder.Definition.Name, builder.Definition.Namespace) + + exists := builder.Exists() + if !exists { + return builder, fmt.Errorf("MulticlusterHub object does not exist") + } + + err := builder.apiClient.Update(context.TODO(), builder.Definition) + if err != nil { + builder.Object = builder.Definition + } + + builder.Object = builder.Definition + + return builder, err +} + +// Delete removes a MultiClusterHub from the cluster. +func (builder *MultiClusterHubBuilder) Delete() error { + if valid, err := builder.validate(); !valid { + return err + } + + glog.V(100).Infof("Deleting the MultiClusterHub %s in the namespace %s", + builder.Definition.Name, builder.Definition.Namespace) + + if !builder.Exists() { + return nil + } + + err := builder.apiClient.Delete(context.TODO(), builder.Definition) + + if err != nil { + return fmt.Errorf("cannot delete MultiClusterHub: %w", err) + } + + builder.Object = nil + builder.Definition.ResourceVersion = "" + builder.Definition.CreationTimestamp = metav1.Time{} + + return nil +} + +// Exists checks if the defined MultiClusterHub has already been created. +func (builder *MultiClusterHubBuilder) Exists() bool { + if valid, _ := builder.validate(); !valid { + return false + } + + glog.V(100).Infof("Checking if MultiClusterHub %s in namespace %s exists", + builder.Definition.Name, builder.Definition.Namespace) + + var err error + builder.Object, err = builder.Get() + + return err == nil || !k8serrors.IsNotFound(err) +} + +// validate will check that the builder and builder definition are properly initialized before +// accessing any member fields. +func (builder *MultiClusterHubBuilder) validate() (bool, error) { + resourceCRD := "MultiClusterHub" + + if builder == nil { + glog.V(100).Infof("The %s builder is uninitialized", resourceCRD) + + return false, fmt.Errorf("error: received nil %s builder", resourceCRD) + } + + if builder.Definition == nil { + glog.V(100).Infof("The %s is undefined", resourceCRD) + + builder.errorMsg = msg.UndefinedCrdObjectErrString(resourceCRD) + } + + if builder.apiClient == nil { + glog.V(100).Infof("The %s builder apiclient is nil", resourceCRD) + + builder.errorMsg = fmt.Sprintf("%s builder cannot have nil apiClient", resourceCRD) + } + + if builder.errorMsg != "" { + glog.V(100).Infof("The %s builder has error message: %s", resourceCRD, builder.errorMsg) + + return false, fmt.Errorf(builder.errorMsg) + } + + return true, nil +} diff --git a/pkg/multiclusterhub/multiclusterhub_test.go b/pkg/multiclusterhub/multiclusterhub_test.go new file mode 100644 index 000000000..296f4a3a9 --- /dev/null +++ b/pkg/multiclusterhub/multiclusterhub_test.go @@ -0,0 +1,383 @@ +package multiclusterhub + +import ( + "fmt" + "testing" + + "github.com/openshift-kni/eco-goinfra/pkg/clients" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + mchv1 "github.com/stolostron/multiclusterhub-operator/api/v1" +) + +var ( + testValue string = "test" +) + +func TestMultiClusterHubPull(t *testing.T) { + testCases := []struct { + expectedError error + multiClusterHubName string + multiClusterHubNamespace string + addToRuntimeObjects bool + }{ + { + expectedError: nil, + multiClusterHubName: testValue, + multiClusterHubNamespace: testValue, + addToRuntimeObjects: true, + }, + + { + expectedError: nil, + multiClusterHubName: "", + multiClusterHubNamespace: testValue, + addToRuntimeObjects: true, + }, + + { + expectedError: nil, + multiClusterHubName: testValue, + multiClusterHubNamespace: "", + addToRuntimeObjects: true, + }, + + { + expectedError: nil, + multiClusterHubName: "", + multiClusterHubNamespace: "", + addToRuntimeObjects: true, + }, + } + + for _, testCase := range testCases { + var ( + runtimeObjects []runtime.Object + testSettings *clients.Settings + ) + + testMultiClusterHub := generateMultiClusterHub( + testCase.multiClusterHubName, testCase.multiClusterHubNamespace) + + if testCase.addToRuntimeObjects { + runtimeObjects = append(runtimeObjects, testMultiClusterHub) + } + + testSettings = clients.GetTestClients(clients.TestClientParams{ + K8sMockObjects: runtimeObjects, + }) + + // Test the PullMultiClusterHub function + builderResult, err := PullMultiClusterHub(testSettings, + testCase.multiClusterHubName, testCase.multiClusterHubNamespace) + + // Check the error + assert.Equal(t, err, testCase.expectedError) + + if testCase.expectedError == nil { + assert.NotNil(t, builderResult) + } + } +} + +func TestMultiClusterHubGet(t *testing.T) { + testCases := []struct { + expectedError error + multiClusterHubName string + multiClusterHubNamespace string + addToRuntimeObjects bool + }{ + { + expectedError: nil, + multiClusterHubName: testValue, + multiClusterHubNamespace: testValue, + addToRuntimeObjects: true, + }, + + { + expectedError: fmt.Errorf("error: received nil MultiClusterHub builder"), + multiClusterHubName: testValue, + multiClusterHubNamespace: testValue, + addToRuntimeObjects: false, + }, + + { + expectedError: fmt.Errorf("MultiClusterHub 'name' cannot be empty"), + multiClusterHubName: "", + multiClusterHubNamespace: testValue, + addToRuntimeObjects: true, + }, + } + + for _, testCase := range testCases { + var ( + runtimeObjects []runtime.Object + testSettings *clients.Settings + ) + + testMultiClusterHub := generateMultiClusterHub( + testCase.multiClusterHubName, testCase.multiClusterHubNamespace) + + if testCase.addToRuntimeObjects { + runtimeObjects = append(runtimeObjects, testMultiClusterHub) + } + + testSettings = clients.GetTestClients(clients.TestClientParams{ + K8sMockObjects: runtimeObjects, + }) + + builder, err := PullMultiClusterHub(testSettings, + testCase.multiClusterHubName, testCase.multiClusterHubNamespace) + if testCase.expectedError == nil { + assert.Nil(t, err) + } + + // Test the Get function + builderResult, err := builder.Get() + + // Check the error + assert.Equal(t, err, testCase.expectedError) + + if testCase.expectedError == nil { + assert.NotNil(t, builderResult) + } + } +} + +func TestMultiClusterHubUpdate(t *testing.T) { + testCases := []struct { + expectedError error + addToRuntimeObjects bool + multiClusterHubName string + multiClusterHubNamespace string + newImageName string + }{ + { + expectedError: nil, + addToRuntimeObjects: true, + multiClusterHubName: testValue, + multiClusterHubNamespace: testValue, + newImageName: "new-image", + }, + } + + for _, testCase := range testCases { + var ( + runtimeObjects []runtime.Object + testSettings *clients.Settings + ) + + testMultiClusterHub := generateMultiClusterHub( + testCase.multiClusterHubName, testCase.multiClusterHubNamespace) + + if testCase.addToRuntimeObjects { + runtimeObjects = append(runtimeObjects, testMultiClusterHub) + } + + testSettings = clients.GetTestClients(clients.TestClientParams{ + K8sMockObjects: runtimeObjects, + }) + + builder, err := PullMultiClusterHub(testSettings, + testCase.multiClusterHubName, testCase.multiClusterHubNamespace) + assert.Nil(t, err) + + // Test the Update function + builder.Definition.Spec.ImagePullSecret = testCase.newImageName + builderResult, err := builder.Update() + + // Check the error + assert.Equal(t, err, testCase.expectedError) + + if testCase.expectedError == nil { + assert.Equal(t, builderResult.Object.Spec.ImagePullSecret, testCase.newImageName) + } + } +} + +func TestMultiClusterHubDelete(t *testing.T) { + testCases := []struct { + expectedError error + addToRuntimeObjects bool + }{ + { + expectedError: nil, + addToRuntimeObjects: true, + }, + { + expectedError: fmt.Errorf("error: received nil MultiClusterHub builder"), + addToRuntimeObjects: false, + }, + } + + for _, testCase := range testCases { + var ( + runtimeObjects []runtime.Object + testSettings *clients.Settings + ) + + testMultiClusterHub := generateMultiClusterHub( + testValue, testValue) + + if testCase.addToRuntimeObjects { + runtimeObjects = append(runtimeObjects, testMultiClusterHub) + } + + testSettings = clients.GetTestClients(clients.TestClientParams{ + K8sMockObjects: runtimeObjects, + }) + + builder, err := PullMultiClusterHub(testSettings, + testValue, testValue) + if testCase.expectedError == nil { + assert.Nil(t, err) + } + + // Test the Delete function + err = builder.Delete() + + // Check the error + assert.Equal(t, err, testCase.expectedError) + } +} + +func TestMultiClusterHubExists(t *testing.T) { + testCases := []struct { + expectedExists bool + addToRuntimeObjects bool + expectedError error + }{ + { + expectedExists: true, + addToRuntimeObjects: true, + expectedError: nil, + }, + { + expectedExists: false, + addToRuntimeObjects: false, + expectedError: fmt.Errorf("MultiClusterHub object test does not exist"), + }, + } + + for _, testCase := range testCases { + var ( + runtimeObjects []runtime.Object + testSettings *clients.Settings + ) + + testMultiClusterHub := generateMultiClusterHub( + testValue, testValue) + + if testCase.addToRuntimeObjects { + runtimeObjects = append(runtimeObjects, testMultiClusterHub) + } + + testSettings = clients.GetTestClients(clients.TestClientParams{ + K8sMockObjects: runtimeObjects, + }) + + builder, err := PullMultiClusterHub(testSettings, + testValue, testValue) + if testCase.expectedError == nil { + assert.Nil(t, err) + } + + // Test the Exists function + result := builder.Exists() + + // Check the result + assert.Equal(t, result, testCase.expectedExists) + } +} + +func TestMultiClusterHubValidate(t *testing.T) { + testCases := []struct { + builderNil bool + definitionNil bool + apiClientNil bool + expectedError error + }{ + { + builderNil: true, + definitionNil: false, + apiClientNil: false, + expectedError: fmt.Errorf("error: received nil MultiClusterHub builder"), + }, + { + builderNil: false, + definitionNil: true, + apiClientNil: false, + expectedError: fmt.Errorf("can not redefine the undefined MultiClusterHub"), + }, + { + builderNil: false, + definitionNil: false, + apiClientNil: true, + expectedError: fmt.Errorf("MultiClusterHub builder cannot have nil apiClient"), + }, + { + builderNil: false, + definitionNil: false, + apiClientNil: false, + expectedError: nil, + }, + } + + for _, testCase := range testCases { + var ( + runtimeObjects []runtime.Object + testSettings *clients.Settings + ) + + testMultiClusterHub := generateMultiClusterHub(testValue, testValue) + + runtimeObjects = append(runtimeObjects, testMultiClusterHub) + + testSettings = clients.GetTestClients(clients.TestClientParams{ + K8sMockObjects: runtimeObjects, + }) + + testBuilder, err := PullMultiClusterHub(testSettings, + testValue, testValue) + + if testCase.expectedError == nil { + assert.Nil(t, err) + } + + if testCase.builderNil { + testBuilder = nil + } + + if testCase.definitionNil { + testBuilder.Definition = nil + } + + if testCase.apiClientNil { + testBuilder.apiClient = nil + } + + result, err := testBuilder.validate() + if testCase.expectedError != nil { + assert.NotNil(t, err) + assert.Equal(t, testCase.expectedError, err) + assert.False(t, result) + } else { + assert.Nil(t, err) + assert.True(t, result) + } + } +} + +func generateMultiClusterHub(name, namespace string) *mchv1.MultiClusterHub { + return &mchv1.MultiClusterHub{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: mchv1.MultiClusterHubSpec{ + ImagePullSecret: "image", + }, + } +} diff --git a/vendor/github.com/stolostron/multiclusterhub-operator/LICENSE b/vendor/github.com/stolostron/multiclusterhub-operator/LICENSE new file mode 100644 index 000000000..f49a4e16e --- /dev/null +++ b/vendor/github.com/stolostron/multiclusterhub-operator/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/groupversion_info.go b/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/groupversion_info.go new file mode 100644 index 000000000..276b9e412 --- /dev/null +++ b/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/groupversion_info.go @@ -0,0 +1,38 @@ +// Copyright Contributors to the Open Cluster Management project + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the operator v1 API group +//+kubebuilder:object:generate=true +//+groupName=operator.open-cluster-management.io +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "operator.open-cluster-management.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/multiclusterhub_methods.go b/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/multiclusterhub_methods.go new file mode 100644 index 000000000..70af18960 --- /dev/null +++ b/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/multiclusterhub_methods.go @@ -0,0 +1,147 @@ +package v1 + +const ( + Search string = "search" + ManagementIngress string = "management-ingress" + Console string = "console" + Insights string = "insights" + GRC string = "grc" + ClusterLifecycle string = "cluster-lifecycle" + ClusterBackup string = "cluster-backup" + ClusterProxyAddon string = "cluster-proxy-addon" + Repo string = "multiclusterhub-repo" + MultiClusterEngine string = "multicluster-engine" + Volsync string = "volsync" + + // MCE + MCEManagedServiceAccount string = "managedserviceaccount-preview" + MCEConsole string = "console-mce" + MCEDiscovery string = "discovery" + MCEHive string = "hive" + MCEAssistedService string = "assisted-service" + MCEClusterLifecycle string = "cluster-lifecycle-mce" + MCEClusterManager string = "cluster-manager" + MCEServerFoundation string = "server-foundation" + MCEHypershift string = "hypershift-preview" +) + +var allComponents = []string{ + // MCH + Repo, + Search, + ManagementIngress, + Console, + Insights, + GRC, + ClusterLifecycle, + ClusterBackup, + ClusterProxyAddon, + Volsync, + MultiClusterEngine, + // MCE + MCEAssistedService, + MCEClusterLifecycle, + MCEClusterManager, + MCEDiscovery, + MCEHive, + MCEServerFoundation, + MCEConsole, + MCEManagedServiceAccount, + MCEHypershift, +} + +var MCEComponents = []string{ + MCEAssistedService, + MCEClusterLifecycle, + MCEClusterManager, + MCEDiscovery, + MCEHive, + MCEServerFoundation, + MCEConsole, + MCEManagedServiceAccount, + MCEHypershift, +} + +var DefaultEnabledComponents = []string{ + Repo, + Search, + ManagementIngress, + Console, + Insights, + GRC, + ClusterLifecycle, + Volsync, + MultiClusterEngine, +} + +var DefaultDisabledComponents = []string{ + ClusterProxyAddon, + ClusterBackup, +} + +func (mch *MultiClusterHub) ComponentPresent(s string) bool { + if mch.Spec.Overrides == nil { + return false + } + for _, c := range mch.Spec.Overrides.Components { + if c.Name == s { + return true + } + } + return false +} + +func (mch *MultiClusterHub) Enabled(s string) bool { + if mch.Spec.Overrides == nil { + return false + } + for _, c := range mch.Spec.Overrides.Components { + if c.Name == s { + return c.Enabled + } + } + + return false +} + +func (mch *MultiClusterHub) Enable(s string) { + if mch.Spec.Overrides == nil { + mch.Spec.Overrides = &Overrides{} + } + for i, c := range mch.Spec.Overrides.Components { + if c.Name == s { + mch.Spec.Overrides.Components[i].Enabled = true + return + } + } + mch.Spec.Overrides.Components = append(mch.Spec.Overrides.Components, ComponentConfig{ + Name: s, + Enabled: true, + }) +} + +func (mch *MultiClusterHub) Disable(s string) { + if mch.Spec.Overrides == nil { + mch.Spec.Overrides = &Overrides{} + } + for i, c := range mch.Spec.Overrides.Components { + if c.Name == s { + mch.Spec.Overrides.Components[i].Enabled = false + return + } + } + mch.Spec.Overrides.Components = append(mch.Spec.Overrides.Components, ComponentConfig{ + Name: s, + Enabled: false, + }) +} + +// a component is valid if its name matches a known component +func ValidComponent(c ComponentConfig) bool { + for _, name := range allComponents { + if c.Name == name { + return true + } + } + return false +} diff --git a/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/multiclusterhub_types.go b/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/multiclusterhub_types.go new file mode 100644 index 000000000..14a4e3d94 --- /dev/null +++ b/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/multiclusterhub_types.go @@ -0,0 +1,348 @@ +// Copyright Contributors to the Open Cluster Management project + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AvailabilityType ... +type AvailabilityType string + +const ( + // HABasic stands up most app subscriptions with a replicaCount of 1 + HABasic AvailabilityType = "Basic" + // HAHigh stands up most app subscriptions with a replicaCount of 2 + HAHigh AvailabilityType = "High" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// MultiClusterHubSpec defines the desired state of MultiClusterHub +type MultiClusterHubSpec struct { + + // Override pull secret for accessing MultiClusterHub operand and endpoint images + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Image Pull Secret",xDescriptors={"urn:alm:descriptor:io.kubernetes:Secret","urn:alm:descriptor:com.tectonic.ui:advanced"} + ImagePullSecret string `json:"imagePullSecret,omitempty"` + + // Specifies deployment replication for improved availability. Options are: Basic and High (default) + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Availability Configuration",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:select:High","urn:alm:descriptor:com.tectonic.ui:select:Basic"} + AvailabilityConfig AvailabilityType `json:"availabilityConfig,omitempty"` + + // (Deprecated) Install cert-manager into its own namespace + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Separate Certificate Management",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} + // +optional + SeparateCertificateManagement bool `json:"separateCertificateManagement"` + + // Set the nodeselectors + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations causes all components to tolerate any taints. + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // (Deprecated) Overrides for the default HiveConfig spec + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Hive Config",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} + Hive *HiveConfigSpec `json:"hive,omitempty"` + + // Configuration options for ingress management + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Ingress Management",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced"} + Ingress IngressSpec `json:"ingress,omitempty"` + + // Developer Overrides + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Developer Overrides",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} + Overrides *Overrides `json:"overrides,omitempty"` + + // Provide the customized OpenShift default ingress CA certificate to RHACM + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Custom CA Configmap",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:io.kubernetes:ConfigMap"} + CustomCAConfigmap string `json:"customCAConfigmap,omitempty"` + + // Disable automatic import of the hub cluster as a managed cluster + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Disable Hub Self Management",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:booleanSwitch"} + DisableHubSelfManagement bool `json:"disableHubSelfManagement,omitempty"` + + // Disable automatic update of ClusterImageSets + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Disable Update ClusterImageSets",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:advanced","urn:alm:descriptor:com.tectonic.ui:booleanSwitch"} + DisableUpdateClusterImageSets bool `json:"disableUpdateClusterImageSets,omitempty"` + + // (Deprecated) Enable cluster proxy addon + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Enable Cluster Proxy Addon",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} + EnableClusterProxyAddon bool `json:"enableClusterProxyAddon,omitempty"` + + // (Deprecated) Enable cluster backup + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Enable Cluster Backup",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} + // +optional + EnableClusterBackup bool `json:"enableClusterBackup"` +} + +// Overrides provides developer overrides for MCH installation +type Overrides struct { + // Pull policy of the MultiCluster hub images + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // Provides optional configuration for components + //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Component Configuration",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"} + // +optional + Components []ComponentConfig `json:"components,omitempty"` +} + +// ComponentConfig provides optional configuration items for individual components +type ComponentConfig struct { + Name string `json:"name"` + Enabled bool `json:"enabled"` +} + +type HiveConfigSpec struct { + + // (Deprecated) ExternalDNS specifies configuration for external-dns if it is to be deployed by + // Hive. If absent, external-dns will not be deployed. + ExternalDNS *ExternalDNSConfig `json:"externalDNS,omitempty"` + + // (Deprecated) AdditionalCertificateAuthorities is a list of references to secrets in the + // 'hive' namespace that contain an additional Certificate Authority to use when communicating + // with target clusters. These certificate authorities will be used in addition to any self-signed + // CA generated by each cluster on installation. + AdditionalCertificateAuthorities []corev1.LocalObjectReference `json:"additionalCertificateAuthorities,omitempty"` + + // (Deprecated) GlobalPullSecret is used to specify a pull secret that will be used globally by all of the cluster deployments. + // For each cluster deployment, the contents of GlobalPullSecret will be merged with the specific pull secret for + // a cluster deployment(if specified), with precedence given to the contents of the pull secret for the cluster deployment. + GlobalPullSecret *corev1.LocalObjectReference `json:"globalPullSecret,omitempty"` + + // (Deprecated) Backup specifies configuration for backup integration. + // If absent, backup integration will be disabled. + Backup BackupConfig `json:"backup,omitempty"` + + // (Deprecated) FailedProvisionConfig is used to configure settings related to handling provision failures. + FailedProvisionConfig FailedProvisionConfig `json:"failedProvisionConfig"` + + // (Deprecated) MaintenanceMode can be set to true to disable the hive controllers in situations where we need to ensure + // nothing is running that will add or act upon finalizers on Hive types. This should rarely be needed. + // Sets replicas to 0 for the hive-controllers deployment to accomplish this. + MaintenanceMode *bool `json:"maintenanceMode,omitempty"` +} + +// HiveConfigStatus defines the observed state of Hive +type HiveConfigStatus struct { + // (Deprecated) AggregatorClientCAHash keeps an md5 hash of the aggregator client CA + // configmap data from the openshift-config-managed namespace. When the configmap changes, + // admission is redeployed. + AggregatorClientCAHash string `json:"aggregatorClientCAHash,omitempty"` +} + +// BackupConfig contains settings for the Velero backup integration. +type BackupConfig struct { + // (Deprecated) Velero specifies configuration for the Velero backup integration. + // +optional + Velero VeleroBackupConfig `json:"velero,omitempty"` + + // (Deprecated) MinBackupPeriodSeconds specifies that a minimum of MinBackupPeriodSeconds will occur in between each backup. + // This is used to rate limit backups. This potentially batches together multiple changes into 1 backup. + // No backups will be lost as changes that happen during this interval are queued up and will result in a + // backup happening once the interval has been completed. + MinBackupPeriodSeconds *int `json:"minBackupPeriodSeconds,omitempty"` +} + +// VeleroBackupConfig contains settings for the Velero backup integration. +type VeleroBackupConfig struct { + // (Deprecated) Enabled dictates if Velero backup integration is enabled. + // If not specified, the default is disabled. + Enabled bool `json:"enabled,omitempty"` +} + +// FailedProvisionConfig contains settings to control behavior undertaken by Hive when an installation attempt fails. +type FailedProvisionConfig struct { + + // (Deprecated) SkipGatherLogs disables functionality that attempts to gather full logs from the cluster if an installation + // fails for any reason. The logs will be stored in a persistent volume for up to 7 days. + SkipGatherLogs bool `json:"skipGatherLogs,omitempty"` +} + +// ExternalDNSConfig contains settings for running external-dns in a Hive +// environment. +type ExternalDNSConfig struct { + + // (Deprecated) AWS contains AWS-specific settings for external DNS + AWS *ExternalDNSAWSConfig `json:"aws,omitempty"` + + // (Deprecated) GCP contains GCP-specific settings for external DNS + GCP *ExternalDNSGCPConfig `json:"gcp,omitempty"` + + // As other cloud providers are supported, additional fields will be + // added for each of those cloud providers. Only a single cloud provider + // may be configured at a time. +} + +// ExternalDNSAWSConfig contains AWS-specific settings for external DNS +type ExternalDNSAWSConfig struct { + // (Deprecated) Credentials references a secret that will be used to authenticate with + // AWS Route53. It will need permission to manage entries in each of the + // managed domains for this cluster. + // Secret should have AWS keys named 'aws_access_key_id' and 'aws_secret_access_key'. + Credentials corev1.LocalObjectReference `json:"credentials,omitempty"` +} + +// ExternalDNSGCPConfig contains GCP-specific settings for external DNS +type ExternalDNSGCPConfig struct { + // (Deprecated) Credentials references a secret that will be used to authenticate with + // GCP DNS. It will need permission to manage entries in each of the + // managed domains for this cluster. + // Secret should have a key names 'osServiceAccount.json'. + // The credentials must specify the project to use. + Credentials corev1.LocalObjectReference `json:"credentials,omitempty"` +} + +// IngressSpec specifies configuration options for ingress management +type IngressSpec struct { + // List of SSL ciphers enabled for management ingress. Defaults to full list of supported ciphers + SSLCiphers []string `json:"sslCiphers,omitempty"` +} + +type HubPhaseType string + +const ( + HubPending HubPhaseType = "Pending" + HubRunning HubPhaseType = "Running" + HubInstalling HubPhaseType = "Installing" + HubUpdating HubPhaseType = "Updating" + HubUninstalling HubPhaseType = "Uninstalling" + HubUpdatingBlocked HubPhaseType = "UpdatingBlocked" +) + +// MultiClusterHubStatus defines the observed state of MultiClusterHub +type MultiClusterHubStatus struct { + + // Represents the running phase of the MultiClusterHub + // +optional + Phase HubPhaseType `json:"phase"` + + // CurrentVersion indicates the current version + CurrentVersion string `json:"currentVersion,omitempty"` + + // DesiredVersion indicates the desired version + DesiredVersion string `json:"desiredVersion,omitempty"` + + // Conditions contains the different condition statuses for the MultiClusterHub + HubConditions []HubCondition `json:"conditions,omitempty"` + + // Components []ComponentCondition `json:"manifests,omitempty"` + Components map[string]StatusCondition `json:"components,omitempty"` +} + +// StatusCondition contains condition information. +type StatusCondition struct { + // The resource kind this condition represents + Kind string `json:"-"` + + // Available indicates whether this component is considered properly running + Available bool `json:"-"` + + // Type is the type of the cluster condition. + // +required + Type string `json:"type,omitempty"` + + // Status is the status of the condition. One of True, False, Unknown. + // +required + Status metav1.ConditionStatus `json:"status,omitempty"` + + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"-"` + + // LastTransitionTime is the last time the condition changed from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + + // Reason is a (brief) reason for the condition's last status change. + // +required + Reason string `json:"reason,omitempty"` + + // Message is a human-readable message indicating details about the last status change. + // +required + Message string `json:"message,omitempty"` +} + +type HubConditionType string + +const ( + // Progressing means the deployment is progressing. + Progressing HubConditionType = "Progressing" + + // Complete means that all desired components are configured and in a running state. + Complete HubConditionType = "Complete" + + // Terminating means that the multiclusterhub has been deleted and is cleaning up. + Terminating HubConditionType = "Terminating" + + // Bocked means there is something preventing an update from occurring + Blocked HubConditionType = "Blocked" +) + +// StatusCondition contains condition information. +type HubCondition struct { + // Type is the type of the cluster condition. + // +required + Type HubConditionType `json:"type,omitempty"` + + // Status is the status of the condition. One of True, False, Unknown. + // +required + Status metav1.ConditionStatus `json:"status,omitempty"` + + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` + + // LastTransitionTime is the last time the condition changed from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + + // Reason is a (brief) reason for the condition's last status change. + // +required + Reason string `json:"reason,omitempty"` + + // Message is a human-readable message indicating details about the last status change. + // +required + Message string `json:"message,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:path=multiclusterhubs,scope=Namespaced,shortName=mch + +// MultiClusterHub defines the configuration for an instance of the MultiCluster Hub +//+kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="The overall status of the multiclusterhub" +//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +//+operator-sdk:csv:customresourcedefinitions:displayName="MultiClusterHub" +type MultiClusterHub struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MultiClusterHubSpec `json:"spec,omitempty"` + Status MultiClusterHubStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// MultiClusterHubList contains a list of MultiClusterHub +type MultiClusterHubList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MultiClusterHub `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MultiClusterHub{}, &MultiClusterHubList{}) +} diff --git a/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/multiclusterhub_webhook.go b/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/multiclusterhub_webhook.go new file mode 100644 index 000000000..b46052679 --- /dev/null +++ b/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/multiclusterhub_webhook.go @@ -0,0 +1,84 @@ +// Copyright Contributors to the Open Cluster Management project + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + cl "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var ( + multiclusterhublog = logf.Log.WithName("multiclusterhub-resource") + Client cl.Client +) + +// TODO: Get Webhook Working ... +func (r *MultiClusterHub) SetupWebhookWithManager(mgr ctrl.Manager) error { + Client = mgr.GetClient() + return ctrl.NewWebhookManagedBy(mgr). + For(r).Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +/* +var _ webhook.Defaulter = &MultiClusterHub{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *MultiClusterHub) Default() { + multiclusterhublog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:name=multiclusterhub-operator-validating-webhook,path=/validate-v1-multiclusterhub,mutating=false,failurePolicy=fail,sideEffects=None,groups=operator.open-cluster-management.io,resources=multiclusterhubs,verbs=create;update;delete,versions=v1,name=multiclusterhub.validating-webhook.open-cluster-management.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Validator = &MultiClusterHub{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *MultiClusterHub) ValidateCreate() error { + multiclusterhublog.Info("validate create", "name", r.Name) + // TODO(user): fill in your validation logic upon object creation. + multiClusterHubList := &MultiClusterHubList{} + if err := Client.List(context.TODO(), multiClusterHubList); err != nil { + return fmt.Errorf("unable to list MultiClusterHubs: %s", err) + } + if len(multiClusterHubList.Items) == 0 { + return nil + } + return fmt.Errorf("the MultiClusterHub CR already exists") +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *MultiClusterHub) ValidateUpdate(old runtime.Object) error { + multiclusterhublog.Info("validate update", "name", r.Name) + // TODO(user): fill in your validation logic upon object update. + return nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *MultiClusterHub) ValidateDelete() error { + multiclusterhublog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil +} +*/ diff --git a/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/zz_generated.deepcopy.go b/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..9e6c230f9 --- /dev/null +++ b/vendor/github.com/stolostron/multiclusterhub-operator/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,404 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupConfig) DeepCopyInto(out *BackupConfig) { + *out = *in + out.Velero = in.Velero + if in.MinBackupPeriodSeconds != nil { + in, out := &in.MinBackupPeriodSeconds, &out.MinBackupPeriodSeconds + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupConfig. +func (in *BackupConfig) DeepCopy() *BackupConfig { + if in == nil { + return nil + } + out := new(BackupConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentConfig) DeepCopyInto(out *ComponentConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentConfig. +func (in *ComponentConfig) DeepCopy() *ComponentConfig { + if in == nil { + return nil + } + out := new(ComponentConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalDNSAWSConfig) DeepCopyInto(out *ExternalDNSAWSConfig) { + *out = *in + out.Credentials = in.Credentials +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalDNSAWSConfig. +func (in *ExternalDNSAWSConfig) DeepCopy() *ExternalDNSAWSConfig { + if in == nil { + return nil + } + out := new(ExternalDNSAWSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalDNSConfig) DeepCopyInto(out *ExternalDNSConfig) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(ExternalDNSAWSConfig) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(ExternalDNSGCPConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalDNSConfig. +func (in *ExternalDNSConfig) DeepCopy() *ExternalDNSConfig { + if in == nil { + return nil + } + out := new(ExternalDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalDNSGCPConfig) DeepCopyInto(out *ExternalDNSGCPConfig) { + *out = *in + out.Credentials = in.Credentials +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalDNSGCPConfig. +func (in *ExternalDNSGCPConfig) DeepCopy() *ExternalDNSGCPConfig { + if in == nil { + return nil + } + out := new(ExternalDNSGCPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailedProvisionConfig) DeepCopyInto(out *FailedProvisionConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedProvisionConfig. +func (in *FailedProvisionConfig) DeepCopy() *FailedProvisionConfig { + if in == nil { + return nil + } + out := new(FailedProvisionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveConfigSpec) DeepCopyInto(out *HiveConfigSpec) { + *out = *in + if in.ExternalDNS != nil { + in, out := &in.ExternalDNS, &out.ExternalDNS + *out = new(ExternalDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.AdditionalCertificateAuthorities != nil { + in, out := &in.AdditionalCertificateAuthorities, &out.AdditionalCertificateAuthorities + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.GlobalPullSecret != nil { + in, out := &in.GlobalPullSecret, &out.GlobalPullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + in.Backup.DeepCopyInto(&out.Backup) + out.FailedProvisionConfig = in.FailedProvisionConfig + if in.MaintenanceMode != nil { + in, out := &in.MaintenanceMode, &out.MaintenanceMode + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveConfigSpec. +func (in *HiveConfigSpec) DeepCopy() *HiveConfigSpec { + if in == nil { + return nil + } + out := new(HiveConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveConfigStatus) DeepCopyInto(out *HiveConfigStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveConfigStatus. +func (in *HiveConfigStatus) DeepCopy() *HiveConfigStatus { + if in == nil { + return nil + } + out := new(HiveConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubCondition) DeepCopyInto(out *HubCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubCondition. +func (in *HubCondition) DeepCopy() *HubCondition { + if in == nil { + return nil + } + out := new(HubCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { + *out = *in + if in.SSLCiphers != nil { + in, out := &in.SSLCiphers, &out.SSLCiphers + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. +func (in *IngressSpec) DeepCopy() *IngressSpec { + if in == nil { + return nil + } + out := new(IngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterHub) DeepCopyInto(out *MultiClusterHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterHub. +func (in *MultiClusterHub) DeepCopy() *MultiClusterHub { + if in == nil { + return nil + } + out := new(MultiClusterHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MultiClusterHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterHubList) DeepCopyInto(out *MultiClusterHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MultiClusterHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterHubList. +func (in *MultiClusterHubList) DeepCopy() *MultiClusterHubList { + if in == nil { + return nil + } + out := new(MultiClusterHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MultiClusterHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterHubSpec) DeepCopyInto(out *MultiClusterHubSpec) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hive != nil { + in, out := &in.Hive, &out.Hive + *out = new(HiveConfigSpec) + (*in).DeepCopyInto(*out) + } + in.Ingress.DeepCopyInto(&out.Ingress) + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = new(Overrides) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterHubSpec. +func (in *MultiClusterHubSpec) DeepCopy() *MultiClusterHubSpec { + if in == nil { + return nil + } + out := new(MultiClusterHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterHubStatus) DeepCopyInto(out *MultiClusterHubStatus) { + *out = *in + if in.HubConditions != nil { + in, out := &in.HubConditions, &out.HubConditions + *out = make([]HubCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make(map[string]StatusCondition, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterHubStatus. +func (in *MultiClusterHubStatus) DeepCopy() *MultiClusterHubStatus { + if in == nil { + return nil + } + out := new(MultiClusterHubStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Overrides) DeepCopyInto(out *Overrides) { + *out = *in + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]ComponentConfig, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overrides. +func (in *Overrides) DeepCopy() *Overrides { + if in == nil { + return nil + } + out := new(Overrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusCondition) DeepCopyInto(out *StatusCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusCondition. +func (in *StatusCondition) DeepCopy() *StatusCondition { + if in == nil { + return nil + } + out := new(StatusCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VeleroBackupConfig) DeepCopyInto(out *VeleroBackupConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VeleroBackupConfig. +func (in *VeleroBackupConfig) DeepCopy() *VeleroBackupConfig { + if in == nil { + return nil + } + out := new(VeleroBackupConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 48290bc75..1473a363d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -50,6 +50,12 @@ github.com/blang/semver # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 +# github.com/bshuster-repo/logrus-logstash-hook v1.0.2 +## explicit; go 1.16 +# github.com/bugsnag/bugsnag-go v2.1.2+incompatible +## explicit +# github.com/bugsnag/panicwrap v1.3.4 +## explicit # github.com/cenkalti/backoff/v3 v3.2.2 ## explicit; go 1.12 github.com/cenkalti/backoff/v3 @@ -230,6 +236,8 @@ github.com/go-openapi/validate github.com/go-task/slim-sprig # github.com/go-test/deep v1.1.0 ## explicit; go 1.16 +# github.com/gofrs/uuid v4.2.0+incompatible +## explicit # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/proto @@ -842,6 +850,9 @@ github.com/stmcginnis/gofish/swordfish # github.com/stoewer/go-strcase v1.2.0 ## explicit; go 1.11 github.com/stoewer/go-strcase +# github.com/stolostron/multiclusterhub-operator v0.0.0-20220516144733-74b7bd46ac55 +## explicit; go 1.17 +github.com/stolostron/multiclusterhub-operator/api/v1 # github.com/stretchr/testify v1.9.0 ## explicit; go 1.17 github.com/stretchr/testify/assert