Skip to content

Commit

Permalink
Edit integration test to call DumpSpecResourcesAndCleanup and collect…
Browse files Browse the repository at this point in the history
… logs

Co-authored-by: huutomerkki <[email protected]>
  • Loading branch information
mboukhalfa and huutomerkki committed Jul 21, 2023
1 parent e82d83e commit c0af60f
Showing 1 changed file with 6 additions and 29 deletions.
35 changes: 6 additions & 29 deletions test/e2e/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,18 @@ package e2e
import (
"path/filepath"

bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
. "github.com/onsi/ginkgo/v2"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/controller-runtime/pkg/client"
)

var _ = Describe("When testing integration [integration]", func() {

It("CI Test Provision", func() {
numberOfWorkers = int(*e2eConfig.GetInt32PtrVariable("WORKER_MACHINE_COUNT"))
numberOfControlplane = int(*e2eConfig.GetInt32PtrVariable("CONTROL_PLANE_MACHINE_COUNT"))
k8sVersion := e2eConfig.GetVariable("KUBERNETES_VERSION")
By("Provision Workload cluster")
targetCluster, result := createTargetCluster(k8sVersion)
targetCluster, _ = createTargetCluster(k8sVersion)
By("Pivot objects to target cluster")
pivoting(ctx, func() PivotingInput {
return PivotingInput{
Expand Down Expand Up @@ -49,31 +48,9 @@ var _ = Describe("When testing integration [integration]", func() {
ClusterctlConfigPath: clusterctlConfigPath,
}
})
By("Deprovision target cluster")
bootstrapClient := bootstrapClusterProxy.GetClient()
intervals := e2eConfig.GetIntervals(specName, "wait-deprovision-cluster")
// In pivoting step we labeled the BMO CRDs (so that the objects are moved
// by CAPI pivoting feature), which made CAPI DeleteClusterAndWait()
// fail as it has a check to make sure all resources managed by CAPI
// is gone after Cluster deletion. Therefore, we opted not to use
// DeleteClusterAndWait(), but only delete the cluster and then wait
// for it to be deleted.
framework.DeleteCluster(ctx, framework.DeleteClusterInput{
Deleter: bootstrapClient,
Cluster: result.Cluster,
})
Logf("Waiting for the Cluster object to be deleted")
framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{
Getter: bootstrapClient,
Cluster: result.Cluster,
}, intervals...)
numberOfAvailableBMHs := numberOfWorkers + numberOfControlplane
intervals = e2eConfig.GetIntervals(specName, "wait-bmh-deprovisioning-available")
WaitForNumBmhInState(ctx, bmov1alpha1.StateAvailable, WaitForNumInput{
Client: bootstrapClient,
Options: []client.ListOption{client.InNamespace(namespace)},
Replicas: numberOfAvailableBMHs,
Intervals: intervals,
})
})

AfterEach(func() {
DumpSpecResourcesAndCleanup(ctx, specName, bootstrapClusterProxy, artifactFolder, namespace, e2eConfig.GetIntervals, clusterName, clusterctlLogFolder, skipCleanup)
})
})

0 comments on commit c0af60f

Please sign in to comment.