-
Notifications
You must be signed in to change notification settings - Fork 96
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Added a log collector implementation for Metal3
Only collects qemu serial logs for now. Integration test modified to more basic version.
- Loading branch information
1 parent
d4629af
commit 4993b23
Showing
3 changed files
with
86 additions
and
65 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,79 +1,34 @@ | ||
package e2e | ||
|
||
import ( | ||
"os" | ||
"path/filepath" | ||
"strings" | ||
|
||
bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" | ||
. "github.com/onsi/ginkgo/v2" | ||
"sigs.k8s.io/cluster-api/test/framework" | ||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
. "github.com/onsi/gomega" | ||
) | ||
|
||
var _ = Describe("When testing integration [integration]", func() { | ||
It("CI Test Provision", func() { | ||
BeforeEach(func() { | ||
osType := strings.ToLower(os.Getenv("OS")) | ||
Expect(osType).ToNot(Equal("")) | ||
validateGlobals(specName) | ||
|
||
// We need to override clusterctl apply log folder to avoid getting our credentials exposed. | ||
clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName()) | ||
}) | ||
|
||
It("Should create a workload cluster", func() { | ||
By("Fetching cluster configuration") | ||
numberOfWorkers = int(*e2eConfig.GetInt32PtrVariable("WORKER_MACHINE_COUNT")) | ||
numberOfControlplane = int(*e2eConfig.GetInt32PtrVariable("CONTROL_PLANE_MACHINE_COUNT")) | ||
k8sVersion := e2eConfig.GetVariable("KUBERNETES_VERSION") | ||
By("Provision Workload cluster") | ||
targetCluster, result := createTargetCluster(k8sVersion) | ||
By("Pivot objects to target cluster") | ||
pivoting(ctx, func() PivotingInput { | ||
return PivotingInput{ | ||
E2EConfig: e2eConfig, | ||
BootstrapClusterProxy: bootstrapClusterProxy, | ||
TargetCluster: targetCluster, | ||
SpecName: specName, | ||
ClusterName: clusterName, | ||
Namespace: namespace, | ||
ArtifactFolder: artifactFolder, | ||
ClusterctlConfigPath: clusterctlConfigPath, | ||
} | ||
}) | ||
// Fetch the target cluster resources before re-pivoting. | ||
By("Fetch the target cluster resources before re-pivoting") | ||
framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{ | ||
Lister: targetCluster.GetClient(), | ||
Namespace: namespace, | ||
LogPath: filepath.Join(artifactFolder, "clusters", clusterName, "resources"), | ||
}) | ||
By("Repivot objects to the source cluster") | ||
rePivoting(ctx, func() RePivotingInput { | ||
return RePivotingInput{ | ||
E2EConfig: e2eConfig, | ||
BootstrapClusterProxy: bootstrapClusterProxy, | ||
TargetCluster: targetCluster, | ||
SpecName: specName, | ||
ClusterName: clusterName, | ||
Namespace: namespace, | ||
ArtifactFolder: artifactFolder, | ||
ClusterctlConfigPath: clusterctlConfigPath, | ||
} | ||
}) | ||
By("Deprovision target cluster") | ||
bootstrapClient := bootstrapClusterProxy.GetClient() | ||
intervals := e2eConfig.GetIntervals(specName, "wait-deprovision-cluster") | ||
// In pivoting step we labeled the BMO CRDs (so that the objects are moved | ||
// by CAPI pivoting feature), which made CAPI DeleteClusterAndWait() | ||
// fail as it has a check to make sure all resources managed by CAPI | ||
// is gone after Cluster deletion. Therefore, we opted not to use | ||
// DeleteClusterAndWait(), but only delete the cluster and then wait | ||
// for it to be deleted. | ||
framework.DeleteCluster(ctx, framework.DeleteClusterInput{ | ||
Deleter: bootstrapClient, | ||
Cluster: result.Cluster, | ||
}) | ||
Logf("Waiting for the Cluster object to be deleted") | ||
framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{ | ||
Getter: bootstrapClient, | ||
Cluster: result.Cluster, | ||
}, intervals...) | ||
numberOfAvailableBMHs := numberOfWorkers + numberOfControlplane | ||
intervals = e2eConfig.GetIntervals(specName, "wait-bmh-deprovisioning-available") | ||
WaitForNumBmhInState(ctx, bmov1alpha1.StateAvailable, WaitForNumInput{ | ||
Client: bootstrapClient, | ||
Options: []client.ListOption{client.InNamespace(namespace)}, | ||
Replicas: numberOfAvailableBMHs, | ||
Intervals: intervals, | ||
}) | ||
targetCluster, _ = createTargetCluster(k8sVersion) | ||
}) | ||
|
||
AfterEach(func() { | ||
DumpSpecResourcesAndCleanup(ctx, specName, bootstrapClusterProxy, artifactFolder, namespace, e2eConfig.GetIntervals, clusterName, clusterctlLogFolder, skipCleanup) | ||
}) | ||
}) |