From ea798db2f8b7f4f619ce0d7b5e63a78b19e93165 Mon Sep 17 00:00:00 2001 From: Aishwarya-Hebbar Date: Mon, 1 Jul 2024 15:08:59 +0530 Subject: [PATCH] svmotion of volumes sanity testcases --- tests/e2e/e2e_common.go | 1 + tests/e2e/storage_policy_utils.go | 49 +++++++ tests/e2e/svmotion_volumes.go | 218 ++++++++++++++++++++++++++++++ tests/e2e/util.go | 32 +++++ tests/e2e/vsphere.go | 60 ++++++++ 5 files changed, 360 insertions(+) diff --git a/tests/e2e/e2e_common.go b/tests/e2e/e2e_common.go index c249d050f3..f5e41afe09 100644 --- a/tests/e2e/e2e_common.go +++ b/tests/e2e/e2e_common.go @@ -85,6 +85,7 @@ const ( envRegionZoneWithSharedDS = "TOPOLOGY_WITH_SHARED_DATASTORE" envRemoteHCIDsUrl = "REMOTE_HCI_DS_URL" envSharedDatastoreURL = "SHARED_VSPHERE_DATASTORE_URL" + envSharedDatastore2URL = "SHARED_VSPHERE_DATASTORE2_URL" envSharedVVOLDatastoreURL = "SHARED_VVOL_DATASTORE_URL" envSharedNFSDatastoreURL = "SHARED_NFS_DATASTORE_URL" envSharedVMFSDatastoreURL = "SHARED_VMFS_DATASTORE_URL" diff --git a/tests/e2e/storage_policy_utils.go b/tests/e2e/storage_policy_utils.go index 76cb675022..e1adfc97f8 100644 --- a/tests/e2e/storage_policy_utils.go +++ b/tests/e2e/storage_policy_utils.go @@ -421,3 +421,52 @@ func createStoragePolicyWithSharedVmfsNVsand(ctx context.Context, pbmClient *pbm return profileID, profileName } + +/* createVsanStoragePolicy create a vsan policy with given allocation type and category/tag map +func createVsanStoragePolicy(ctx context.Context, pbmClient *pbm.Client, categoryTagMap map[string]string) (*pbmtypes.PbmProfileId, string) { + s1 := rand.NewSource(time.Now().UnixNano()) + r1 := rand.New(s1) + profileName := fmt.Sprintf("vsan-policy-%v-%v", time.Now().UnixNano(), strconv.Itoa(r1.Intn(1000))) + pbmCreateSpec := pbm.CapabilityProfileCreateSpec{ + Name: profileName, + Description: "VSAN test policy", + Category: "REQUIREMENT", + CapabilityList: []pbm.Capability{ + { + ID: "hostFailuresToTolerate", + Namespace: "VSAN", + PropertyList: []pbm.Property{ + { + ID: "hostFailuresToTolerate", + Value: "true", + Operator: "unset", + DataType: "anyType", + }, + }, + }, + }, + } + for k, v := range categoryTagMap { + + pbmCreateSpec.CapabilityList = append(pbmCreateSpec.CapabilityList, pbm.Capability{ + ID: k, + Namespace: "http://www.vmware.com/storage/tag", + PropertyList: []pbm.Property{ + { + ID: "com.vmware.storage.tag." + k + ".property", + Value: v, + DataType: "set", + }, + }, + }) + } + createSpecVSAN, err := pbm.CreateCapabilityProfileSpec(pbmCreateSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + profileID, err := pbmClient.CreateProfile(ctx, *createSpecVSAN) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + framework.Logf("VSAN profile with id: %v and name: '%v' created", profileID.UniqueId, profileName) + + return profileID, profileName +}*/ diff --git a/tests/e2e/svmotion_volumes.go b/tests/e2e/svmotion_volumes.go index baead396b0..f9a2ba670b 100644 --- a/tests/e2e/svmotion_volumes.go +++ b/tests/e2e/svmotion_volumes.go @@ -35,6 +35,8 @@ import ( "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" pbmtypes "github.com/vmware/govmomi/pbm/types" + vim25types "github.com/vmware/govmomi/vim25/types" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -44,6 +46,7 @@ import ( fnodes "k8s.io/kubernetes/test/e2e/framework/node" fpod "k8s.io/kubernetes/test/e2e/framework/pod" fpv "k8s.io/kubernetes/test/e2e/framework/pv" + fss "k8s.io/kubernetes/test/e2e/framework/statefulset" admissionapi "k8s.io/pod-security-admission/api" ) @@ -76,6 +79,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] Re labelValue string pvc10g string pandoraSyncWaitTime int + migratedVms []vim25types.ManagedObjectReference ) ginkgo.BeforeEach(func() { bootstrap() @@ -1384,4 +1388,218 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] Re gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) + + /* Migrate dynamic PVCs with no common host + STEPS: + 1. Create SC with vsan default storage policy created in step 1. + 2. Provision 10 PVCs using the storage class of 1 GB. + 3. Create 3 statefulsets with 3 replicas with the above storageclass + 4. Wait for all volumes and application pods to be healthy. + 5. Verify volumes are created on CNS by using CNSQuery API and also check metadata is pushed to CNS + 6. Migrate the k8s cluster from 1 vsphere cluster to another vsphere cluster. + 7. Migrate detached volumes to destination datastore using CNS relocate Volume API. + 8. Continue writing into the attached volume while the migration is in progress. + 9. Wait and verify the volume migration is a success. + 10.Apply labels to all PVCs and PVs and verify volume's metadata is intact and volume's health reflects accessible and compliant. + 11.Expand all volumes to size of 10GB. + 12.Attach pods to the remaining PVCs created at step 2 and verify to able to read and write data to the volume. + 13.Scale up all statefulset replicas to 5. + 14.Delete all the workloads created from test. + 15. Delete storage class. + */ + ginkgo.It("Migrate Dynamic PVCs with no common host", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + pvcCount := 10 + var statefulSetReplicaCount int32 = 3 + stsCount := 3 + var statefulsets []*appsv1.StatefulSet + + sharedDatastoreURL := os.Getenv(envSharedDatastoreURL) + if sharedDatastoreURL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedDatastoreURL)) + } + + sharedDatastore2URL := os.Getenv(envSharedDatastore2URL) + if sharedDatastore2URL == "" { + ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedDatastore2URL)) + } + + govmomiClient := newClient(ctx, &e2eVSphere) + pc := newPbmClient(ctx, govmomiClient) + scParameters := make(map[string]string) + pvcs := []*v1.PersistentVolumeClaim{} + + rand.New(rand.NewSource(time.Now().UnixNano())) + suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000)) + categoryName := "category" + suffix + tagName := "tag" + suffix + + ginkgo.By("Creating tag and category to tag datastore") + + catID, tagID := createCategoryNTag(ctx, categoryName, tagName) + defer func() { + deleteCategoryNTag(ctx, catID, tagID) + }() + + ginkgo.By("Attaching tag to shared vmfs datastores") + + attachTagToDS(ctx, tagID, sharedDatastoreURL) + defer func() { + detachTagFromDS(ctx, tagID, sharedDatastoreURL) + }() + + ginkgo.By("Create Tag Based policy with shared datstores") + policyID, policyName := createTagBasedPolicy( + ctx, pc, map[string]string{categoryName: tagName}) + defer func() { + deleteStoragePolicy(ctx, pc, policyID) + }() + + ginkgo.By("Create Storageclass from the policy created") + scParameters[scParamStoragePolicyName] = policyName + sc, err := createStorageClass(client, scParameters, + nil, "", "", true, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + ginkgo.By("Delete the SCs created") + err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Create Storageclass from the policy created") + pvcs = createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil) + + defer func() { + for i := 0; i < pvcCount; i++ { + pv := getPvFromClaim(client, pvcs[i].Namespace, pvcs[i].Name) + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvcs[i].Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort)) + err = multiVCe2eVSphere.waitForCNSVolumeToBeDeletedInMultiVC(pv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }() + ginkgo.By("Verify the PVCs created in step 3 are bound") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + for i := 0; i < stsCount; i++ { + sts := createCustomisedStatefulSets(ctx, client, namespace, false, statefulSetReplicaCount, + false, nil, false, true, "sts"+strconv.Itoa(i), "", sc, "") + statefulsets = append(statefulsets, sts) + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + fss.DeleteAllStatefulSets(ctx, client, namespace) + }() + + // Get the list of Volumes attached to Pods before scale down + + for _, sts := range statefulsets { + + ssPodsBeforeScaleDown := fss.GetPodList(ctx, client, sts) + for _, sspod := range ssPodsBeforeScaleDown.Items { + _, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, volumespec := range sspod.Spec.Volumes { + if volumespec.PersistentVolumeClaim != nil { + pv := getPvFromClaim(client, sts.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + ginkgo.By("Verify if VolumeID is created on the given datastores") + volumeID := pv.Spec.CSI.VolumeHandle + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID) + framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent) + e2eVSphere.verifyDatastoreMatch(volumeID, []string{sharedDatastoreURL}) + // Verify the attached volume match the one in CNS cache + if !multivc { + err := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + err := verifyVolumeMetadataInCNSForMultiVC(&multiVCe2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + } + } + } + + for i := 0; i < pvcCount; i++ { + volumeID := pvs[i].Spec.CSI.VolumeHandle + + ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id") + storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) + e2eVSphere.verifyVolumeCompliance(volumeID, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + + ginkgo.By("Verify if VolumeID is created on the given datastores") + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID) + framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent) + e2eVSphere.verifyDatastoreMatch(volumeID, []string{sharedDatastoreURL}) + + } + + ginkgo.By("storage vmotion K8s node VMs to another datastore of a different cluster") + k8sNodeIpList := getK8sNodeIPs(ctx, client) + k8sNodeVmRef := getK8sVmMos(ctx, client, k8sNodeIpList) + + for _, vm := range k8sNodeVmRef { + e2eVSphere.svmotionVM2DiffDsOfDiffCluster(ctx, object.NewVirtualMachine(e2eVSphere.Client.Client, vm.Reference()), + sharedDatastoreURL, "cluster2") + migratedVms = append(migratedVms, vm) + } + + attachTagToDS(ctx, tagID, sharedDatastore2URL) + defer func() { + detachTagFromDS(ctx, tagID, sharedDatastore2URL) + }() + + for _, sts := range statefulsets { + + ssPodsBeforeScaleDown := fss.GetPodList(ctx, client, sts) + for _, sspod := range ssPodsBeforeScaleDown.Items { + _, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, volumespec := range sspod.Spec.Volumes { + if volumespec.PersistentVolumeClaim != nil { + pv := getPvFromClaim(client, sts.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + ginkgo.By("Verify if VolumeID is created on the given datastores") + volumeID := pv.Spec.CSI.VolumeHandle + dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID) + framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent) + e2eVSphere.verifyDatastoreMatch(volumeID, []string{sharedDatastore2URL}) + // Verify the attached volume match the one in CNS cache + if !multivc { + err := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + err := verifyVolumeMetadataInCNSForMultiVC(&multiVCe2eVSphere, pv.Spec.CSI.VolumeHandle, + volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + } + } + } + + for i := 0; i < pvcCount; i++ { + volumeID := pvs[i].Spec.CSI.VolumeHandle + ginkgo.By("Relocate volume from one shared datastore to another datastore using" + + "CnsRelocateVolume API") + dsRefDest := getDsMoRefFromURL(ctx, sharedDatastore2URL) + _, err = e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volumeID, dsRefDest) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + e2eVSphere.verifyDatastoreMatch(volumeID, []string{sharedDatastore2URL}) + + ginkgo.By("Verify that the relocated CNS volumes are compliant and have correct policy id") + storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName) + e2eVSphere.verifyVolumeCompliance(volumeID, true) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed") + } + + }) }) diff --git a/tests/e2e/util.go b/tests/e2e/util.go index 3e6a2406de..992d18ee2f 100644 --- a/tests/e2e/util.go +++ b/tests/e2e/util.go @@ -7017,5 +7017,37 @@ func removeStoragePolicyQuota(ctx context.Context, restClientConfig *rest.Config pkgtypes.NamespacedName{Name: scName + storagePolicyQuota, Namespace: namespace}, spq) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("Quota after removing: %s", spq.Spec.Limit) +} + +// getK8sMasterIP gets k8s master ip in vanilla setup. +func getK8sNodeIPs(ctx context.Context, client clientset.Interface) []string { + var err error + nodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + var k8sNodeIPs []string + for _, node := range nodes.Items { + + addrs := node.Status.Addresses + for _, addr := range addrs { + if addr.Type == v1.NodeExternalIP && (net.ParseIP(addr.Address)).To4() != nil { + k8sNodeIPs = append(k8sNodeIPs, addr.Address) + } + } + + } + gomega.Expect(k8sNodeIPs).NotTo(gomega.BeEmpty(), "Unable to find k8s node IP") + return k8sNodeIPs +} + +// +func getK8sVmMos(ctx context.Context, client clientset.Interface, k8sNodeIpList []string) []vim25types.ManagedObjectReference { + vmIp2MoRefMap := vmIpToMoRefMap(ctx) + + k8sNodeVmRef := []vim25types.ManagedObjectReference{} + + for _, ip := range k8sNodeIpList { + k8sNodeVmRef = append(k8sNodeVmRef, vmIp2MoRefMap[ip]) + } + return k8sNodeVmRef } diff --git a/tests/e2e/vsphere.go b/tests/e2e/vsphere.go index 61a4240179..ed19a79020 100644 --- a/tests/e2e/vsphere.go +++ b/tests/e2e/vsphere.go @@ -1345,6 +1345,66 @@ func (vs *vSphere) getDsByUrl(ctx context.Context, datastoreURL string) mo.Datas return dsMo } +func (vs *vSphere) getResourcePoolList(ctx context.Context, clusterName string) mo.ResourcePool { + finder := find.NewFinder(vs.Client.Client, false) + dcString := e2eVSphere.Config.Global.Datacenters + dc, err := finder.Datacenter(ctx, dcString) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + finder.SetDatacenter(dc) + resourcePools, err := finder.ResourcePoolList(ctx, "*") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("resourcePoolList: %v", resourcePools) + var targetRsPoolMo mo.ResourcePool + var resourcePoolList []vim25types.ManagedObjectReference + for _, rsPool := range resourcePools { + rsPath := rsPool.InventoryPath + framework.Logf("rsPath: %v", rsPath) + rsPathList := strings.Split(rsPath, "/") + if rsPathList[1] == clusterName { + framework.Logf("moObj:%v, rsPoolProperties: %v", rsPool.Reference(), rsPool.Properties(ctx, rsPool.Reference(), nil, targetRsPoolMo)) + break + } + resourcePoolList = append(resourcePoolList, rsPool.Reference()) + //targetRsPoolMo=&rsPool.Reference() + } + /*framework.Logf("resPooolMoList: %v", resourcePoolList) + var rsPoolMoList []mo.ResourcePool + pc := property.DefaultCollector(vs.Client.Client) + framework.Logf("pc:%v", pc) + properties := []string{"summary"} + err = pc.Retrieve(ctx, resourcePoolList, properties, &rsPoolMoList) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("rsPoolMoList : %v", rsPoolMoList) + var targetRsPoolMo mo.ResourcePool + for _, mo := range rsPoolMoList { + framework.Logf("mo:%v") + targetRsPoolMo = mo + } + gomega.Expect(targetRsPoolMo).NotTo(gomega.BeNil()) + */ + return targetRsPoolMo + +} + +func (vs *vSphere) svmotionVM2DiffDsOfDiffCluster(ctx context.Context, vm *object.VirtualMachine, destinationDsUrl string, clusterName string) { + dsMo := vs.getDsByUrl(ctx, destinationDsUrl) + relocateSpec := vim25types.VirtualMachineRelocateSpec{} + dsref := dsMo.Reference() + rsPoolMo := vs.getResourcePoolList(ctx, "*") + relocateSpec.Datastore = &dsref + framework.Logf("rsPoolMo: %v", rsPoolMo) + + //relocateSpec.Pool = rsPoolMo + vmname, err := vm.ObjectName(ctx) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Starting relocation of vm %s to datastore %s of different cluster: %s", vmname, dsref.Value, clusterName) + task, err := vm.Relocate(ctx, relocateSpec, vim25types.VirtualMachineMovePriorityHighPriority) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _, err = task.WaitForResultEx(ctx) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.Logf("Relocation of vm %s to datastore %s completed successfully: %s", vmname, dsref.Value, clusterName) +} + func (vs *vSphere) getAllVms(ctx context.Context) []*object.VirtualMachine { finder := find.NewFinder(vs.Client.Client, false) dcString := e2eVSphere.Config.Global.Datacenters