Skip to content

Commit

Permalink
Merge branch 'main' into enabled-cephfs-discovered
Browse files Browse the repository at this point in the history
  • Loading branch information
Sheetalpamecha authored Dec 8, 2024
2 parents 88fbb83 + 8f74ebc commit 48ecb2a
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 65 deletions.
16 changes: 7 additions & 9 deletions e2e/dractions/actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ package dractions

import (
"strings"
"time"

ramen "github.com/ramendr/ramen/api/v1alpha1"
"github.com/ramendr/ramen/e2e/deployers"
Expand All @@ -17,8 +16,6 @@ import (

const (
OcmSchedulingDisable = "cluster.open-cluster-management.io/experimental-scheduling-disable"

FiveSecondsDuration = 5 * time.Second
)

// If AppSet/Subscription, find Placement
Expand Down Expand Up @@ -126,7 +123,7 @@ func Failover(ctx types.Context) error {

log.Info("Failing over workload")

return failoverRelocate(ctx, ramen.ActionFailover)
return failoverRelocate(ctx, ramen.ActionFailover, ramen.FailedOver)
}

// Determine DRPC
Expand All @@ -143,10 +140,11 @@ func Relocate(ctx types.Context) error {

log.Info("Relocating workload")

return failoverRelocate(ctx, ramen.ActionRelocate)
return failoverRelocate(ctx, ramen.ActionRelocate, ramen.Relocated)
}

func failoverRelocate(ctx types.Context, action ramen.DRAction) error {
func failoverRelocate(ctx types.Context, action ramen.DRAction, state ramen.DRState) error {
drpcName := ctx.Name()
name := ctx.Name()
namespace := ctx.Namespace()
drpcName := name
Expand All @@ -156,11 +154,11 @@ func failoverRelocate(ctx types.Context, action ramen.DRAction) error {
return err
}

if action == ramen.ActionFailover {
return waitDRPC(ctx, client, namespace, name, ramen.FailedOver)
if err := waitDRPCPhase(ctx, client, namespace, drpcName, state); err != nil {
return err
}

return waitDRPC(ctx, client, namespace, name, ramen.Relocated)
return waitDRPCReady(ctx, client, namespace, drpcName)
}

func waitAndUpdateDRPC(
Expand Down
9 changes: 5 additions & 4 deletions e2e/dractions/actionsdiscoveredapps.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,18 +90,18 @@ func FailoverDiscoveredApps(ctx types.Context) error {
log := ctx.Logger()
log.Info("Failing over workload")

return failoverRelocateDiscoveredApps(ctx, ramen.ActionFailover)
return failoverRelocateDiscoveredApps(ctx, ramen.ActionFailover, ramen.FailedOver)
}

func RelocateDiscoveredApps(ctx types.Context) error {
log := ctx.Logger()
log.Info("Relocating workload")

return failoverRelocateDiscoveredApps(ctx, ramen.ActionRelocate)
return failoverRelocateDiscoveredApps(ctx, ramen.ActionRelocate, ramen.Relocated)
}

// nolint:funlen,cyclop
func failoverRelocateDiscoveredApps(ctx types.Context, action ramen.DRAction) error {
func failoverRelocateDiscoveredApps(ctx types.Context, action ramen.DRAction, state ramen.DRState) error {
name := ctx.Name()
log := ctx.Logger()
namespace := ctx.Namespace() // this namespace is in hub
Expand Down Expand Up @@ -143,10 +143,11 @@ func failoverRelocateDiscoveredApps(ctx types.Context, action ramen.DRAction) er
}

if err = waitDRPCProgression(ctx, client, namespace, name, ramen.ProgressionCompleted, false); err != nil {

return err
}

if err = waitDRPCReady(ctx, client, namespace, name); err != nil {
if err := waitDRPCReady(ctx, client, namespace, name); err != nil {
return err
}

Expand Down
55 changes: 12 additions & 43 deletions e2e/dractions/retry.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ import (
"github.com/ramendr/ramen/e2e/types"
"github.com/ramendr/ramen/e2e/util"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"open-cluster-management.io/api/cluster/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
Expand Down Expand Up @@ -48,58 +50,35 @@ func waitDRPCReady(ctx types.Context, client client.Client, namespace string, dr
log := ctx.Logger()
startTime := time.Now()

log.Info("Waiting until drpc is ready")
for {
drpc, err := getDRPC(client, namespace, drpcName)
if err != nil {
return err
}

conditionReady := checkDRPCConditions(drpc)
if conditionReady && drpc.Status.LastGroupSyncTime != nil {
available := conditionMet(drpc.Status.Conditions, ramen.ConditionAvailable)
peerReady := conditionMet(drpc.Status.Conditions, ramen.ConditionPeerReady)

if available && peerReady && drpc.Status.LastGroupSyncTime != nil {
log.Info("drpc is ready")

return nil
}

if time.Since(startTime) > util.Timeout {
if !conditionReady {
log.Info("drpc condition 'Available' or 'PeerReady' is not True")
}

if conditionReady && drpc.Status.LastGroupSyncTime == nil {
log.Info("drpc LastGroupSyncTime is nil")
}

return fmt.Errorf("drpc %q is not ready yet before timeout, fail", drpcName)
return fmt.Errorf("timeout waiting for drpc to become ready (Available: %v, PeerReady: %v, lastGroupSyncTime: %v)",
available, peerReady, drpc.Status.LastGroupSyncTime)
}

time.Sleep(util.RetryInterval)
}
}

func checkDRPCConditions(drpc *ramen.DRPlacementControl) bool {
available := false
peerReady := false

for _, cond := range drpc.Status.Conditions {
if cond.Type == "Available" {
if cond.Status != "True" {
return false
}

available = true
}
func conditionMet(conditions []metav1.Condition, conditionType string) bool {
condition := meta.FindStatusCondition(conditions, conditionType)

if cond.Type == "PeerReady" {
if cond.Status != "True" {
return false
}

peerReady = true
}
}

return available && peerReady
return condition != nil && condition.Status == "True"
}

func waitDRPCPhase(ctx types.Context, client client.Client, namespace, name string, phase ramen.DRState) error {
Expand Down Expand Up @@ -161,16 +140,6 @@ func getTargetCluster(client client.Client, namespace, placementName string, drp
return targetCluster, nil
}

// first wait DRPC to have the expected phase, then check DRPC conditions
func waitDRPC(ctx types.Context, client client.Client, namespace, name string, expectedPhase ramen.DRState) error {
// check Phase
if err := waitDRPCPhase(ctx, client, namespace, name, expectedPhase); err != nil {
return err
}
// then check Conditions
return waitDRPCReady(ctx, client, namespace, name)
}

func waitDRPCDeleted(ctx types.Context, client client.Client, namespace string, name string) error {
log := ctx.Logger()
startTime := time.Now()
Expand Down
24 changes: 15 additions & 9 deletions test/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,8 @@ environment.
virtctl
```

lima version 1.0.0 or later is required.
lima version 1.0.0 or later is required, latest version is
recommended.

1. Install the `clusteradm` tool. See
[Install clusteradm CLI tool](https://open-cluster-management.io/getting-started/installation/start-the-control-plane/#install-clusteradm-cli-tool)
Expand All @@ -156,19 +157,24 @@ environment.

For more info see [kubectl-gather](https://github.com/nirs/kubectl-gather)

1. Install `socket_vmnet` from source
1. Install `socket_vmnet`

> [!IMPORTANT]
> Do not install socket_vmnet from brew, it is insecure.
> You must install the socket_vmnet launchd service, we don't manage
> socket_vment with Lima.
```
git clone https://github.com/lima-vm/socket_vmnet.git
cd socket_vmnet
sudo make PREFIX=/opt/socket_vmnet install.bin
sudo make PREFIX=/opt/socket_vmnet install.launchd
VERSION="$(curl -fsSL https://api.github.com/repos/lima-vm/socket_vmnet/releases/latest | jq -r .tag_name)"
FILE="socket_vmnet-${VERSION:1}-$(uname -m).tar.gz"
SERVICE_ID="io.github.lima-vm.socket_vmnet"
curl -OSL "https://github.com/lima-vm/socket_vmnet/releases/download/${VERSION}/${FILE}"
sudo tar Cxzvf / "${FILE}" opt/socket_vmnet
sudo cp "/opt/socket_vmnet/share/doc/socket_vmnet/launchd/$SERVICE_ID.plist" "/Library/LaunchDaemons/$SERVICE_ID.plist"
sudo launchctl bootstrap system "/Library/LaunchDaemons/$SERVICE_ID.plist"
sudo launchctl enable system/$SERVICE_ID
sudo launchctl kickstart -kp system/$SERVICE_ID
```

For more info see [Installing socket_vmnet from source](https://github.com/lima-vm/socket_vmnet?tab=readme-ov-file#from-source)
For more info see [Installing socket_vmnet from binary](https://github.com/lima-vm/socket_vmnet?tab=readme-ov-file#from-binary)

## Testing that drenv is healthy

Expand Down

0 comments on commit 48ecb2a

Please sign in to comment.