Skip to content

Commit

Permalink
Added a log collector implementation for Metal3
Browse files Browse the repository at this point in the history
Only collects qemu serial logs for now.

Integration test modified to more basic version.
  • Loading branch information
huutomerkki committed Jul 19, 2023
1 parent d4629af commit 4993b23
Show file tree
Hide file tree
Showing 3 changed files with 86 additions and 65 deletions.
65 changes: 65 additions & 0 deletions test/e2e/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
Expand All @@ -25,6 +26,7 @@ import (
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -105,6 +107,8 @@ func DumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterPr
Expect(os.RemoveAll(clusterctlLogFolder)).Should(Succeed())
client := clusterProxy.GetClient()

clusterProxy.CollectWorkloadClusterLogs(ctx, namespace, clusterName, artifactFolder)

// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
By(fmt.Sprintf("Dumping all the Cluster API resources in the %q namespace", namespace))
// Dump all Cluster API related resources to artifacts before deleting them.
Expand Down Expand Up @@ -554,3 +558,64 @@ func Metal3MachineToBmhName(m3machine infrav1.Metal3Machine) string {
func BmhToVMName(host bmov1alpha1.BareMetalHost) string {
return strings.ReplaceAll(host.Name, "-", "_")
}

func BmhNameToVMName(hostname string) string {
return strings.ReplaceAll(hostname, "-", "_")
}

func MachineToVMName(ctx context.Context, cli client.Client, m *clusterv1.Machine) (string, error) {
allMetal3Machines := &infrav1.Metal3MachineList{}
Expect(cli.List(ctx, allMetal3Machines, client.InNamespace(m.Namespace))).To(Succeed())
for _, machine := range allMetal3Machines.Items {
name, err := Metal3MachineToMachineName(machine)
if err != nil {
Logf("error getting Machine name from Metal3machine: %s", err)
} else if name == m.Name {
return BmhNameToVMName(Metal3MachineToBmhName(machine)), nil
}
}
return "", fmt.Errorf("no matching Metal3Machine found for current Machine")
}

type Metal3LogCollector struct{}

func (Metal3LogCollector) CollectMachineLog(ctx context.Context, cli client.Client, m *clusterv1.Machine, outputPath string) error {
VMName, err := MachineToVMName(ctx, cli, m)
if err != nil {
return fmt.Errorf("error while fetching the VM name: %w", err)
}

qemuFolder := path.Join(outputPath, VMName)
if err := os.MkdirAll(qemuFolder, 0o750); err != nil {
fmt.Fprintf(GinkgoWriter, "couldn't create directory %q : %s\n", qemuFolder, err)
}

serialLog := fmt.Sprintf("/var/log/libvirt/qemu/%s-serial0.log", VMName)
if _, err := os.Stat(serialLog); os.IsNotExist(err) {
return fmt.Errorf("error finding the serial log: %w", err)
}

copyCmd := fmt.Sprintf("sudo cp %s %s", serialLog, qemuFolder)
cmd := exec.Command("/bin/sh", "-c", copyCmd) // #nosec G204:gosec
if output, err := cmd.Output(); err != nil {
return fmt.Errorf("something went wrong when executing '%s': %w, output: %s", cmd.String(), err, output)
}
setPermsCmd := fmt.Sprintf("sudo chmod -v 777 %s", path.Join(qemuFolder, filepath.Base(serialLog)))
cmd = exec.Command("/bin/sh", "-c", setPermsCmd) // #nosec G204:gosec
output, err := cmd.Output()
Logf("chmod output: %s", output)
if err != nil {
return fmt.Errorf("error changing file permissions after copying: %w, output: %s", err, output)
}

Logf("Successfully collected logs for machine %s", m.Name)
return nil
}

func (Metal3LogCollector) CollectMachinePoolLog(_ context.Context, _ client.Client, _ *expv1.MachinePool, _ string) error {
return fmt.Errorf("CollectMachinePoolLog not implemented")
}

func (Metal3LogCollector) CollectInfrastructureLogs(_ context.Context, _ client.Client, _ *clusterv1.Cluster, _ string) error {
return fmt.Errorf("CollectInfrastructureLogs not implemented")
}
3 changes: 2 additions & 1 deletion test/e2e/e2e_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,8 @@ var _ = SynchronizedBeforeSuite(func() []byte {
kubeconfigPath := parts[3]

e2eConfig = loadE2EConfig(configPath)
bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, initScheme())
withMetal3LogCollectorOpt := framework.WithMachineLogCollector(Metal3LogCollector{})
bootstrapClusterProxy = framework.NewClusterProxy("bootstrap", kubeconfigPath, initScheme(), withMetal3LogCollectorOpt)
})

// Using a SynchronizedAfterSuite for controlling how to delete resources shared across ParallelNodes (~ginkgo threads).
Expand Down
83 changes: 19 additions & 64 deletions test/e2e/integration_test.go
Original file line number Diff line number Diff line change
@@ -1,79 +1,34 @@
package e2e

import (
"os"
"path/filepath"
"strings"

bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
. "github.com/onsi/ginkgo/v2"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/controller-runtime/pkg/client"
. "github.com/onsi/gomega"
)

var _ = Describe("When testing integration [integration]", func() {
It("CI Test Provision", func() {
BeforeEach(func() {
osType := strings.ToLower(os.Getenv("OS"))
Expect(osType).ToNot(Equal(""))
validateGlobals(specName)

// We need to override clusterctl apply log folder to avoid getting our credentials exposed.
clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName())
})

It("Should create a workload cluster", func() {
By("Fetching cluster configuration")
numberOfWorkers = int(*e2eConfig.GetInt32PtrVariable("WORKER_MACHINE_COUNT"))
numberOfControlplane = int(*e2eConfig.GetInt32PtrVariable("CONTROL_PLANE_MACHINE_COUNT"))
k8sVersion := e2eConfig.GetVariable("KUBERNETES_VERSION")
By("Provision Workload cluster")
targetCluster, result := createTargetCluster(k8sVersion)
By("Pivot objects to target cluster")
pivoting(ctx, func() PivotingInput {
return PivotingInput{
E2EConfig: e2eConfig,
BootstrapClusterProxy: bootstrapClusterProxy,
TargetCluster: targetCluster,
SpecName: specName,
ClusterName: clusterName,
Namespace: namespace,
ArtifactFolder: artifactFolder,
ClusterctlConfigPath: clusterctlConfigPath,
}
})
// Fetch the target cluster resources before re-pivoting.
By("Fetch the target cluster resources before re-pivoting")
framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{
Lister: targetCluster.GetClient(),
Namespace: namespace,
LogPath: filepath.Join(artifactFolder, "clusters", clusterName, "resources"),
})
By("Repivot objects to the source cluster")
rePivoting(ctx, func() RePivotingInput {
return RePivotingInput{
E2EConfig: e2eConfig,
BootstrapClusterProxy: bootstrapClusterProxy,
TargetCluster: targetCluster,
SpecName: specName,
ClusterName: clusterName,
Namespace: namespace,
ArtifactFolder: artifactFolder,
ClusterctlConfigPath: clusterctlConfigPath,
}
})
By("Deprovision target cluster")
bootstrapClient := bootstrapClusterProxy.GetClient()
intervals := e2eConfig.GetIntervals(specName, "wait-deprovision-cluster")
// In pivoting step we labeled the BMO CRDs (so that the objects are moved
// by CAPI pivoting feature), which made CAPI DeleteClusterAndWait()
// fail as it has a check to make sure all resources managed by CAPI
// is gone after Cluster deletion. Therefore, we opted not to use
// DeleteClusterAndWait(), but only delete the cluster and then wait
// for it to be deleted.
framework.DeleteCluster(ctx, framework.DeleteClusterInput{
Deleter: bootstrapClient,
Cluster: result.Cluster,
})
Logf("Waiting for the Cluster object to be deleted")
framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{
Getter: bootstrapClient,
Cluster: result.Cluster,
}, intervals...)
numberOfAvailableBMHs := numberOfWorkers + numberOfControlplane
intervals = e2eConfig.GetIntervals(specName, "wait-bmh-deprovisioning-available")
WaitForNumBmhInState(ctx, bmov1alpha1.StateAvailable, WaitForNumInput{
Client: bootstrapClient,
Options: []client.ListOption{client.InNamespace(namespace)},
Replicas: numberOfAvailableBMHs,
Intervals: intervals,
})
targetCluster, _ = createTargetCluster(k8sVersion)
})

AfterEach(func() {
DumpSpecResourcesAndCleanup(ctx, specName, bootstrapClusterProxy, artifactFolder, namespace, e2eConfig.GetIntervals, clusterName, clusterctlLogFolder, skipCleanup)
})
})

0 comments on commit 4993b23

Please sign in to comment.