Skip to content

Commit

Permalink
svmotion of volumes sanity testcases
Browse files Browse the repository at this point in the history
  • Loading branch information
Aishwarya-Hebbar committed Jul 3, 2024
1 parent 89e4fc9 commit 2cc976f
Show file tree
Hide file tree
Showing 4 changed files with 301 additions and 0 deletions.
1 change: 1 addition & 0 deletions tests/e2e/e2e_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ const (
envRegionZoneWithSharedDS = "TOPOLOGY_WITH_SHARED_DATASTORE"
envRemoteHCIDsUrl = "REMOTE_HCI_DS_URL"
envSharedDatastoreURL = "SHARED_VSPHERE_DATASTORE_URL"
envSharedDatastore2URL = "SHARED_VSPHERE_DATASTORE2_URL"
envSharedVVOLDatastoreURL = "SHARED_VVOL_DATASTORE_URL"
envSharedNFSDatastoreURL = "SHARED_NFS_DATASTORE_URL"
envSharedVMFSDatastoreURL = "SHARED_VMFS_DATASTORE_URL"
Expand Down
218 changes: 218 additions & 0 deletions tests/e2e/svmotion_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ import (
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
pbmtypes "github.com/vmware/govmomi/pbm/types"
vim25types "github.com/vmware/govmomi/vim25/types"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
Expand All @@ -44,6 +46,7 @@ import (
fnodes "k8s.io/kubernetes/test/e2e/framework/node"
fpod "k8s.io/kubernetes/test/e2e/framework/pod"
fpv "k8s.io/kubernetes/test/e2e/framework/pv"
fss "k8s.io/kubernetes/test/e2e/framework/statefulset"
admissionapi "k8s.io/pod-security-admission/api"
)

Expand Down Expand Up @@ -76,6 +79,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] Re
labelValue string
pvc10g string
pandoraSyncWaitTime int
migratedVms []vim25types.ManagedObjectReference
)
ginkgo.BeforeEach(func() {
bootstrap()
Expand Down Expand Up @@ -1384,4 +1388,218 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] Re
gomega.Expect(err).NotTo(gomega.HaveOccurred())

})

/* Migrate dynamic PVCs with no common host
STEPS:
1. Create SC with vsan default storage policy created in step 1.
2. Provision 10 PVCs using the storage class of 1 GB.
3. Create 3 statefulsets with 3 replicas with the above storageclass
4. Wait for all volumes and application pods to be healthy.
5. Verify volumes are created on CNS by using CNSQuery API and also check metadata is pushed to CNS
6. Migrate the k8s cluster from 1 vsphere cluster to another vsphere cluster.
7. Migrate detached volumes to destination datastore using CNS relocate Volume API.
8. Continue writing into the attached volume while the migration is in progress.
9. Wait and verify the volume migration is a success.
10.Apply labels to all PVCs and PVs and verify volume's metadata is intact and volume's health reflects accessible and compliant.
11.Expand all volumes to size of 10GB.
12.Attach pods to the remaining PVCs created at step 2 and verify to able to read and write data to the volume.
13.Scale up all statefulset replicas to 5.
14.Delete all the workloads created from test.
15. Delete storage class.
*/
ginkgo.It("Migrate Dynamic PVCs with no common host", func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pvcCount := 10
var statefulSetReplicaCount int32 = 3
stsCount := 3
var statefulsets []*appsv1.StatefulSet

sharedDatastoreURL := os.Getenv(envSharedDatastoreURL)
if sharedDatastoreURL == "" {
ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedDatastoreURL))
}

sharedDatastore2URL := os.Getenv(envSharedDatastore2URL)
if sharedDatastore2URL == "" {
ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedDatastore2URL))
}

govmomiClient := newClient(ctx, &e2eVSphere)
pc := newPbmClient(ctx, govmomiClient)
scParameters := make(map[string]string)
pvcs := []*v1.PersistentVolumeClaim{}

rand.New(rand.NewSource(time.Now().UnixNano()))
suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000))
categoryName := "category" + suffix
tagName := "tag" + suffix

ginkgo.By("Creating tag and category to tag datastore")

catID, tagID := createCategoryNTag(ctx, categoryName, tagName)
defer func() {
deleteCategoryNTag(ctx, catID, tagID)
}()

ginkgo.By("Attaching tag to shared vmfs datastores")

attachTagToDS(ctx, tagID, sharedDatastoreURL)
defer func() {
detachTagFromDS(ctx, tagID, sharedDatastoreURL)
}()

attachTagToDS(ctx, tagID, sharedDatastore2URL)
defer func() {
detachTagFromDS(ctx, tagID, sharedDatastore2URL)
}()

ginkgo.By("Create Tag Based policy with shared datstores")
policyID, policyName := createTagBasedPolicy(
ctx, pc, map[string]string{categoryName: tagName})
defer func() {
deleteStoragePolicy(ctx, pc, policyID)
}()

ginkgo.By("Create Storageclass from the policy created")
scParameters[scParamStoragePolicyName] = policyName
sc, err := createStorageClass(client, scParameters,
nil, "", "", true, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
ginkgo.By("Delete the SCs created")
err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()

ginkgo.By("Create Storageclass from the policy created")
pvcs = createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil)

defer func() {
for i := 0; i < pvcCount; i++ {
pv := getPvFromClaim(client, pvcs[i].Namespace, pvcs[i].Name)
err = fpv.DeletePersistentVolumeClaim(ctx, client, pvcs[i].Name, namespace)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort))
err = multiVCe2eVSphere.waitForCNSVolumeToBeDeletedInMultiVC(pv.Spec.CSI.VolumeHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}()
ginkgo.By("Verify the PVCs created in step 3 are bound")
pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

for i := 0; i < stsCount; i++ {
sts := createCustomisedStatefulSets(ctx, client, namespace, false, statefulSetReplicaCount,
false, nil, false, true, "sts"+strconv.Itoa(i), "", sc, "")
statefulsets = append(statefulsets, sts)
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
fss.DeleteAllStatefulSets(ctx, client, namespace)
}()

// Get the list of Volumes attached to Pods before scale down

for _, sts := range statefulsets {

ssPodsBeforeScaleDown := fss.GetPodList(ctx, client, sts)
for _, sspod := range ssPodsBeforeScaleDown.Items {
_, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, volumespec := range sspod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
pv := getPvFromClaim(client, sts.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
ginkgo.By("Verify if VolumeID is created on the given datastores")
volumeID := pv.Spec.CSI.VolumeHandle
dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID)
framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent)
e2eVSphere.verifyDatastoreMatch(volumeID, []string{sharedDatastoreURL})
// Verify the attached volume match the one in CNS cache
if !multivc {
err := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle,
volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
} else {
err := verifyVolumeMetadataInCNSForMultiVC(&multiVCe2eVSphere, pv.Spec.CSI.VolumeHandle,
volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}
}
}
}

for i := 0; i < pvcCount; i++ {
volumeID := pvs[i].Spec.CSI.VolumeHandle

ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id")
storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName)
e2eVSphere.verifyVolumeCompliance(volumeID, true)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed")

ginkgo.By("Verify if VolumeID is created on the given datastores")
dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID)
framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent)
e2eVSphere.verifyDatastoreMatch(volumeID, []string{sharedDatastoreURL})

}

ginkgo.By("storage vmotion K8s node VMs to another datastore of a different cluster")
k8sNodeIpList := getK8sNodeIPs(ctx, client)
k8sNodeVmRef := getK8sVmMos(ctx, client, k8sNodeIpList)

for _, vm := range k8sNodeVmRef {
e2eVSphere.svmotionVM2DiffDsOfDiffCluster(ctx, object.NewVirtualMachine(e2eVSphere.Client.Client, vm.Reference()),
sharedDatastoreURL, "cluster2")
migratedVms = append(migratedVms, vm)
}

for _, sts := range statefulsets {

ssPodsBeforeScaleDown := fss.GetPodList(ctx, client, sts)
for _, sspod := range ssPodsBeforeScaleDown.Items {
_, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, volumespec := range sspod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
pv := getPvFromClaim(client, sts.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
ginkgo.By("Verify if VolumeID is created on the given datastores")
volumeID := pv.Spec.CSI.VolumeHandle
dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID)
framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent)
e2eVSphere.verifyDatastoreMatch(volumeID, []string{sharedDatastore2URL})
// Verify the attached volume match the one in CNS cache
if !multivc {
err := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle,
volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
} else {
err := verifyVolumeMetadataInCNSForMultiVC(&multiVCe2eVSphere, pv.Spec.CSI.VolumeHandle,
volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}
}
}
}

for i := 0; i < pvcCount; i++ {
volumeID := pvs[i].Spec.CSI.VolumeHandle
ginkgo.By("Relocate volume from one shared datastore to another datastore using" +
"CnsRelocateVolume API")
dsRefDest := getDsMoRefFromURL(ctx, sharedDatastore2URL)
_, err = e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volumeID, dsRefDest)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2eVSphere.verifyDatastoreMatch(volumeID, []string{sharedDatastore2URL})

ginkgo.By("Verify that the relocated CNS volumes are compliant and have correct policy id")
storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName)
e2eVSphere.verifyVolumeCompliance(volumeID, true)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed")
}

})
})
33 changes: 33 additions & 0 deletions tests/e2e/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -6974,3 +6974,36 @@ func getHostMoref4K8sNode(
vmIp2MoRefMap := vmIpToMoRefMap(ctx)
return vmIp2MoRefMap[getK8sNodeIP(node)]
}

// getK8sMasterIP gets k8s master ip in vanilla setup.
func getK8sNodeIPs(ctx context.Context, client clientset.Interface) []string {
var err error
nodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
var k8sNodeIPs []string
for _, node := range nodes.Items {

addrs := node.Status.Addresses
for _, addr := range addrs {
if addr.Type == v1.NodeExternalIP && (net.ParseIP(addr.Address)).To4() != nil {
k8sNodeIPs = append(k8sNodeIPs, addr.Address)
}
}

}
gomega.Expect(k8sNodeIPs).NotTo(gomega.BeEmpty(), "Unable to find k8s node IP")
return k8sNodeIPs
}

//
func getK8sVmMos(ctx context.Context, client clientset.Interface, k8sNodeIpList []string) []vim25types.ManagedObjectReference {
vmIp2MoRefMap := vmIpToMoRefMap(ctx)

k8sNodeVmRef := []vim25types.ManagedObjectReference{}

for _, ip := range k8sNodeIpList {
k8sNodeVmRef = append(k8sNodeVmRef, vmIp2MoRefMap[ip])
}

return k8sNodeVmRef
}
49 changes: 49 additions & 0 deletions tests/e2e/vsphere.go
Original file line number Diff line number Diff line change
Expand Up @@ -1345,6 +1345,55 @@ func (vs *vSphere) getDsByUrl(ctx context.Context, datastoreURL string) mo.Datas
return dsMo
}

//
func (vs *vSphere) getResourcePoolList(ctx context.Context, clusterName string) mo.ResourcePool {
finder := find.NewFinder(vs.Client.Client, false)
dcString := e2eVSphere.Config.Global.Datacenters
dc, err := finder.Datacenter(ctx, dcString)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
finder.SetDatacenter(dc)
resourcePools, err := finder.ResourcePoolList(ctx, "*")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("resourcePoolList: %v", resourcePools)
var resourcePoolList []vim25types.ManagedObjectReference
for _, rsPool := range resourcePools {
resourcePoolList = append(resourcePoolList, rsPool.Reference())
}
var rsPoolMoList []mo.ResourcePool
pc := property.DefaultCollector(vs.Client.Client)
properties := []string{"info"}
err = pc.Retrieve(ctx, resourcePoolList, properties, &rsPoolMoList)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
var targetRsPoolMo mo.ResourcePool
for _, mo := range rsPoolMoList {

targetRsPoolMo = mo
}
gomega.Expect(targetRsPoolMo).NotTo(gomega.BeNil())
return targetRsPoolMo

}

//
func (vs *vSphere) svmotionVM2DiffDsOfDiffCluster(ctx context.Context, vm *object.VirtualMachine, destinationDsUrl string, clusterName string) {
dsMo := vs.getDsByUrl(ctx, destinationDsUrl)
relocateSpec := vim25types.VirtualMachineRelocateSpec{}
dsref := dsMo.Reference()
rsPoolMo := vs.getResourcePoolList(ctx, "*")
relocateSpec.Datastore = &dsref
framework.Logf("rsPoolMo: %v", rsPoolMo)

//relocateSpec.Pool = rsPoolMo.Reference()
vmname, err := vm.ObjectName(ctx)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Starting relocation of vm %s to datastore %s of different cluster: %s", vmname, dsref.Value, clusterName)
task, err := vm.Relocate(ctx, relocateSpec, vim25types.VirtualMachineMovePriorityHighPriority)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
_, err = task.WaitForResultEx(ctx)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Relocation of vm %s to datastore %s completed successfully: %s", vmname, dsref.Value, clusterName)
}

func (vs *vSphere) getAllVms(ctx context.Context) []*object.VirtualMachine {
finder := find.NewFinder(vs.Client.Client, false)
dcString := e2eVSphere.Config.Global.Datacenters
Expand Down

0 comments on commit 2cc976f

Please sign in to comment.