diff --git a/e2e/dractions/actions.go b/e2e/dractions/actions.go index b4ee87408..91c5db7c8 100644 --- a/e2e/dractions/actions.go +++ b/e2e/dractions/actions.go @@ -5,7 +5,6 @@ package dractions import ( "strings" - "time" ramen "github.com/ramendr/ramen/api/v1alpha1" "github.com/ramendr/ramen/e2e/deployers" @@ -17,8 +16,6 @@ import ( const ( OcmSchedulingDisable = "cluster.open-cluster-management.io/experimental-scheduling-disable" - - FiveSecondsDuration = 5 * time.Second ) // If AppSet/Subscription, find Placement @@ -126,7 +123,7 @@ func Failover(ctx types.Context) error { log.Info("Failing over workload") - return failoverRelocate(ctx, ramen.ActionFailover) + return failoverRelocate(ctx, ramen.ActionFailover, ramen.FailedOver) } // Determine DRPC @@ -143,10 +140,11 @@ func Relocate(ctx types.Context) error { log.Info("Relocating workload") - return failoverRelocate(ctx, ramen.ActionRelocate) + return failoverRelocate(ctx, ramen.ActionRelocate, ramen.Relocated) } -func failoverRelocate(ctx types.Context, action ramen.DRAction) error { +func failoverRelocate(ctx types.Context, action ramen.DRAction, state ramen.DRState) error { + drpcName := ctx.Name() name := ctx.Name() namespace := ctx.Namespace() drpcName := name @@ -156,11 +154,11 @@ func failoverRelocate(ctx types.Context, action ramen.DRAction) error { return err } - if action == ramen.ActionFailover { - return waitDRPC(ctx, client, namespace, name, ramen.FailedOver) + if err := waitDRPCPhase(ctx, client, namespace, drpcName, state); err != nil { + return err } - return waitDRPC(ctx, client, namespace, name, ramen.Relocated) + return waitDRPCReady(ctx, client, namespace, drpcName) } func waitAndUpdateDRPC( diff --git a/e2e/dractions/actionsdiscoveredapps.go b/e2e/dractions/actionsdiscoveredapps.go index 7f5ca21a6..8c5ef748d 100644 --- a/e2e/dractions/actionsdiscoveredapps.go +++ b/e2e/dractions/actionsdiscoveredapps.go @@ -90,18 +90,18 @@ func FailoverDiscoveredApps(ctx types.Context) error { log := ctx.Logger() log.Info("Failing over workload") - return failoverRelocateDiscoveredApps(ctx, ramen.ActionFailover) + return failoverRelocateDiscoveredApps(ctx, ramen.ActionFailover, ramen.FailedOver) } func RelocateDiscoveredApps(ctx types.Context) error { log := ctx.Logger() log.Info("Relocating workload") - return failoverRelocateDiscoveredApps(ctx, ramen.ActionRelocate) + return failoverRelocateDiscoveredApps(ctx, ramen.ActionRelocate, ramen.Relocated) } // nolint:funlen,cyclop -func failoverRelocateDiscoveredApps(ctx types.Context, action ramen.DRAction) error { +func failoverRelocateDiscoveredApps(ctx types.Context, action ramen.DRAction, state ramen.DRState) error { name := ctx.Name() log := ctx.Logger() namespace := ctx.Namespace() // this namespace is in hub @@ -143,10 +143,11 @@ func failoverRelocateDiscoveredApps(ctx types.Context, action ramen.DRAction) er } if err = waitDRPCProgression(ctx, client, namespace, name, ramen.ProgressionCompleted, false); err != nil { + return err } - if err = waitDRPCReady(ctx, client, namespace, name); err != nil { + if err := waitDRPCReady(ctx, client, namespace, name); err != nil { return err } diff --git a/e2e/dractions/retry.go b/e2e/dractions/retry.go index 705555665..fedb8b1fb 100644 --- a/e2e/dractions/retry.go +++ b/e2e/dractions/retry.go @@ -12,6 +12,8 @@ import ( "github.com/ramendr/ramen/e2e/types" "github.com/ramendr/ramen/e2e/util" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "open-cluster-management.io/api/cluster/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -48,58 +50,35 @@ func waitDRPCReady(ctx types.Context, client client.Client, namespace string, dr log := ctx.Logger() startTime := time.Now() + log.Info("Waiting until drpc is ready") for { drpc, err := getDRPC(client, namespace, drpcName) if err != nil { return err } - conditionReady := checkDRPCConditions(drpc) - if conditionReady && drpc.Status.LastGroupSyncTime != nil { + available := conditionMet(drpc.Status.Conditions, ramen.ConditionAvailable) + peerReady := conditionMet(drpc.Status.Conditions, ramen.ConditionPeerReady) + + if available && peerReady && drpc.Status.LastGroupSyncTime != nil { log.Info("drpc is ready") return nil } if time.Since(startTime) > util.Timeout { - if !conditionReady { - log.Info("drpc condition 'Available' or 'PeerReady' is not True") - } - - if conditionReady && drpc.Status.LastGroupSyncTime == nil { - log.Info("drpc LastGroupSyncTime is nil") - } - - return fmt.Errorf("drpc %q is not ready yet before timeout, fail", drpcName) + return fmt.Errorf("timeout waiting for drpc to become ready (Available: %v, PeerReady: %v, lastGroupSyncTime: %v)", + available, peerReady, drpc.Status.LastGroupSyncTime) } time.Sleep(util.RetryInterval) } } -func checkDRPCConditions(drpc *ramen.DRPlacementControl) bool { - available := false - peerReady := false - - for _, cond := range drpc.Status.Conditions { - if cond.Type == "Available" { - if cond.Status != "True" { - return false - } - - available = true - } +func conditionMet(conditions []metav1.Condition, conditionType string) bool { + condition := meta.FindStatusCondition(conditions, conditionType) - if cond.Type == "PeerReady" { - if cond.Status != "True" { - return false - } - - peerReady = true - } - } - - return available && peerReady + return condition != nil && condition.Status == "True" } func waitDRPCPhase(ctx types.Context, client client.Client, namespace, name string, phase ramen.DRState) error { @@ -161,16 +140,6 @@ func getTargetCluster(client client.Client, namespace, placementName string, drp return targetCluster, nil } -// first wait DRPC to have the expected phase, then check DRPC conditions -func waitDRPC(ctx types.Context, client client.Client, namespace, name string, expectedPhase ramen.DRState) error { - // check Phase - if err := waitDRPCPhase(ctx, client, namespace, name, expectedPhase); err != nil { - return err - } - // then check Conditions - return waitDRPCReady(ctx, client, namespace, name) -} - func waitDRPCDeleted(ctx types.Context, client client.Client, namespace string, name string) error { log := ctx.Logger() startTime := time.Now() diff --git a/test/README.md b/test/README.md index 10dbe883a..412dc46da 100644 --- a/test/README.md +++ b/test/README.md @@ -136,7 +136,8 @@ environment. virtctl ``` - lima version 1.0.0 or later is required. + lima version 1.0.0 or later is required, latest version is + recommended. 1. Install the `clusteradm` tool. See [Install clusteradm CLI tool](https://open-cluster-management.io/getting-started/installation/start-the-control-plane/#install-clusteradm-cli-tool) @@ -156,19 +157,24 @@ environment. For more info see [kubectl-gather](https://github.com/nirs/kubectl-gather) -1. Install `socket_vmnet` from source +1. Install `socket_vmnet` > [!IMPORTANT] - > Do not install socket_vmnet from brew, it is insecure. - + > You must install the socket_vmnet launchd service, we don't manage + > socket_vment with Lima. ``` - git clone https://github.com/lima-vm/socket_vmnet.git - cd socket_vmnet - sudo make PREFIX=/opt/socket_vmnet install.bin - sudo make PREFIX=/opt/socket_vmnet install.launchd + VERSION="$(curl -fsSL https://api.github.com/repos/lima-vm/socket_vmnet/releases/latest | jq -r .tag_name)" + FILE="socket_vmnet-${VERSION:1}-$(uname -m).tar.gz" + SERVICE_ID="io.github.lima-vm.socket_vmnet" + curl -OSL "https://github.com/lima-vm/socket_vmnet/releases/download/${VERSION}/${FILE}" + sudo tar Cxzvf / "${FILE}" opt/socket_vmnet + sudo cp "/opt/socket_vmnet/share/doc/socket_vmnet/launchd/$SERVICE_ID.plist" "/Library/LaunchDaemons/$SERVICE_ID.plist" + sudo launchctl bootstrap system "/Library/LaunchDaemons/$SERVICE_ID.plist" + sudo launchctl enable system/$SERVICE_ID + sudo launchctl kickstart -kp system/$SERVICE_ID ``` - For more info see [Installing socket_vmnet from source](https://github.com/lima-vm/socket_vmnet?tab=readme-ov-file#from-source) + For more info see [Installing socket_vmnet from binary](https://github.com/lima-vm/socket_vmnet?tab=readme-ov-file#from-binary) ## Testing that drenv is healthy