Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP]svmotion of volumes sanity testcases #2942

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions tests/e2e/e2e_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ const (
envRegionZoneWithSharedDS = "TOPOLOGY_WITH_SHARED_DATASTORE"
envRemoteHCIDsUrl = "REMOTE_HCI_DS_URL"
envSharedDatastoreURL = "SHARED_VSPHERE_DATASTORE_URL"
envSharedDatastore2URL = "SHARED_VSPHERE_DATASTORE2_URL"
envSharedVVOLDatastoreURL = "SHARED_VVOL_DATASTORE_URL"
envSharedNFSDatastoreURL = "SHARED_NFS_DATASTORE_URL"
envSharedVMFSDatastoreURL = "SHARED_VMFS_DATASTORE_URL"
Expand Down
49 changes: 49 additions & 0 deletions tests/e2e/storage_policy_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -421,3 +421,52 @@ func createStoragePolicyWithSharedVmfsNVsand(ctx context.Context, pbmClient *pbm

return profileID, profileName
}

/* createVsanStoragePolicy create a vsan policy with given allocation type and category/tag map
func createVsanStoragePolicy(ctx context.Context, pbmClient *pbm.Client, categoryTagMap map[string]string) (*pbmtypes.PbmProfileId, string) {
s1 := rand.NewSource(time.Now().UnixNano())
r1 := rand.New(s1)
profileName := fmt.Sprintf("vsan-policy-%v-%v", time.Now().UnixNano(), strconv.Itoa(r1.Intn(1000)))
pbmCreateSpec := pbm.CapabilityProfileCreateSpec{
Name: profileName,
Description: "VSAN test policy",
Category: "REQUIREMENT",
CapabilityList: []pbm.Capability{
{
ID: "hostFailuresToTolerate",
Namespace: "VSAN",
PropertyList: []pbm.Property{
{
ID: "hostFailuresToTolerate",
Value: "true",
Operator: "unset",
DataType: "anyType",
},
},
},
},
}
for k, v := range categoryTagMap {

pbmCreateSpec.CapabilityList = append(pbmCreateSpec.CapabilityList, pbm.Capability{
ID: k,
Namespace: "http://www.vmware.com/storage/tag",
PropertyList: []pbm.Property{
{
ID: "com.vmware.storage.tag." + k + ".property",
Value: v,
DataType: "set",
},
},
})
}
createSpecVSAN, err := pbm.CreateCapabilityProfileSpec(pbmCreateSpec)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

profileID, err := pbmClient.CreateProfile(ctx, *createSpecVSAN)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

framework.Logf("VSAN profile with id: %v and name: '%v' created", profileID.UniqueId, profileName)

return profileID, profileName
}*/
218 changes: 218 additions & 0 deletions tests/e2e/svmotion_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ import (
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
pbmtypes "github.com/vmware/govmomi/pbm/types"
vim25types "github.com/vmware/govmomi/vim25/types"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
Expand All @@ -44,6 +46,7 @@ import (
fnodes "k8s.io/kubernetes/test/e2e/framework/node"
fpod "k8s.io/kubernetes/test/e2e/framework/pod"
fpv "k8s.io/kubernetes/test/e2e/framework/pv"
fss "k8s.io/kubernetes/test/e2e/framework/statefulset"
admissionapi "k8s.io/pod-security-admission/api"
)

Expand Down Expand Up @@ -76,6 +79,7 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] Re
labelValue string
pvc10g string
pandoraSyncWaitTime int
migratedVms []vim25types.ManagedObjectReference
)
ginkgo.BeforeEach(func() {
bootstrap()
Expand Down Expand Up @@ -1384,4 +1388,218 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-block-vanilla-parallelized] Re
gomega.Expect(err).NotTo(gomega.HaveOccurred())

})

/* Migrate dynamic PVCs with no common host
STEPS:
1. Create SC with vsan default storage policy created in step 1.
2. Provision 10 PVCs using the storage class of 1 GB.
3. Create 3 statefulsets with 3 replicas with the above storageclass
4. Wait for all volumes and application pods to be healthy.
5. Verify volumes are created on CNS by using CNSQuery API and also check metadata is pushed to CNS
6. Migrate the k8s cluster from 1 vsphere cluster to another vsphere cluster.
7. Migrate detached volumes to destination datastore using CNS relocate Volume API.
8. Continue writing into the attached volume while the migration is in progress.
9. Wait and verify the volume migration is a success.
10.Apply labels to all PVCs and PVs and verify volume's metadata is intact and volume's health reflects accessible and compliant.
11.Expand all volumes to size of 10GB.
12.Attach pods to the remaining PVCs created at step 2 and verify to able to read and write data to the volume.
13.Scale up all statefulset replicas to 5.
14.Delete all the workloads created from test.
15. Delete storage class.
*/
ginkgo.It("Migrate Dynamic PVCs with no common host", func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pvcCount := 10
var statefulSetReplicaCount int32 = 3
stsCount := 3
var statefulsets []*appsv1.StatefulSet

sharedDatastoreURL := os.Getenv(envSharedDatastoreURL)
if sharedDatastoreURL == "" {
ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedDatastoreURL))
}

sharedDatastore2URL := os.Getenv(envSharedDatastore2URL)
if sharedDatastore2URL == "" {
ginkgo.Skip(fmt.Sprintf("Env %v is missing", envSharedDatastore2URL))
}

govmomiClient := newClient(ctx, &e2eVSphere)
pc := newPbmClient(ctx, govmomiClient)
scParameters := make(map[string]string)
pvcs := []*v1.PersistentVolumeClaim{}

rand.New(rand.NewSource(time.Now().UnixNano()))
suffix := fmt.Sprintf("-%v-%v", time.Now().UnixNano(), rand.Intn(10000))
categoryName := "category" + suffix
tagName := "tag" + suffix

ginkgo.By("Creating tag and category to tag datastore")

catID, tagID := createCategoryNTag(ctx, categoryName, tagName)
defer func() {
deleteCategoryNTag(ctx, catID, tagID)
}()

ginkgo.By("Attaching tag to shared vmfs datastores")

attachTagToDS(ctx, tagID, sharedDatastoreURL)
defer func() {
detachTagFromDS(ctx, tagID, sharedDatastoreURL)
}()

ginkgo.By("Create Tag Based policy with shared datstores")
policyID, policyName := createTagBasedPolicy(
ctx, pc, map[string]string{categoryName: tagName})
defer func() {
deleteStoragePolicy(ctx, pc, policyID)
}()

ginkgo.By("Create Storageclass from the policy created")
scParameters[scParamStoragePolicyName] = policyName
sc, err := createStorageClass(client, scParameters,
nil, "", "", true, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
ginkgo.By("Delete the SCs created")
err := client.StorageV1().StorageClasses().Delete(ctx, sc.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()

ginkgo.By("Create Storageclass from the policy created")
pvcs = createMultiplePVCsInParallel(ctx, client, namespace, sc, pvcCount, nil)

defer func() {
for i := 0; i < pvcCount; i++ {
pv := getPvFromClaim(client, pvcs[i].Namespace, pvcs[i].Name)
err = fpv.DeletePersistentVolumeClaim(ctx, client, pvcs[i].Name, namespace)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort))
err = multiVCe2eVSphere.waitForCNSVolumeToBeDeletedInMultiVC(pv.Spec.CSI.VolumeHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}()
ginkgo.By("Verify the PVCs created in step 3 are bound")
pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, pvcs, framework.ClaimProvisionTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

for i := 0; i < stsCount; i++ {
sts := createCustomisedStatefulSets(ctx, client, namespace, false, statefulSetReplicaCount,
false, nil, false, true, "sts"+strconv.Itoa(i), "", sc, "")
statefulsets = append(statefulsets, sts)
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
fss.DeleteAllStatefulSets(ctx, client, namespace)
}()

// Get the list of Volumes attached to Pods before scale down

for _, sts := range statefulsets {

ssPodsBeforeScaleDown := fss.GetPodList(ctx, client, sts)
for _, sspod := range ssPodsBeforeScaleDown.Items {
_, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, volumespec := range sspod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
pv := getPvFromClaim(client, sts.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
ginkgo.By("Verify if VolumeID is created on the given datastores")
volumeID := pv.Spec.CSI.VolumeHandle
dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID)
framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent)
e2eVSphere.verifyDatastoreMatch(volumeID, []string{sharedDatastoreURL})
// Verify the attached volume match the one in CNS cache
if !multivc {
err := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle,
volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
} else {
err := verifyVolumeMetadataInCNSForMultiVC(&multiVCe2eVSphere, pv.Spec.CSI.VolumeHandle,
volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}
}
}
}

for i := 0; i < pvcCount; i++ {
volumeID := pvs[i].Spec.CSI.VolumeHandle

ginkgo.By("Verify that the created CNS volumes are compliant and have correct policy id")
storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName)
e2eVSphere.verifyVolumeCompliance(volumeID, true)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed")

ginkgo.By("Verify if VolumeID is created on the given datastores")
dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID)
framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent)
e2eVSphere.verifyDatastoreMatch(volumeID, []string{sharedDatastoreURL})

}

ginkgo.By("storage vmotion K8s node VMs to another datastore of a different cluster")
k8sNodeIpList := getK8sNodeIPs(ctx, client)
k8sNodeVmRef := getK8sVmMos(ctx, client, k8sNodeIpList)

for _, vm := range k8sNodeVmRef {
e2eVSphere.svmotionVM2DiffDsOfDiffCluster(ctx, object.NewVirtualMachine(e2eVSphere.Client.Client, vm.Reference()),
sharedDatastoreURL, "cluster2")
migratedVms = append(migratedVms, vm)
}

attachTagToDS(ctx, tagID, sharedDatastore2URL)
defer func() {
detachTagFromDS(ctx, tagID, sharedDatastore2URL)
}()

for _, sts := range statefulsets {

ssPodsBeforeScaleDown := fss.GetPodList(ctx, client, sts)
for _, sspod := range ssPodsBeforeScaleDown.Items {
_, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, volumespec := range sspod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
pv := getPvFromClaim(client, sts.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
ginkgo.By("Verify if VolumeID is created on the given datastores")
volumeID := pv.Spec.CSI.VolumeHandle
dsUrlWhereVolumeIsPresent := fetchDsUrl4CnsVol(e2eVSphere, volumeID)
framework.Logf("Volume: %s is present on %s", volumeID, dsUrlWhereVolumeIsPresent)
e2eVSphere.verifyDatastoreMatch(volumeID, []string{sharedDatastore2URL})
// Verify the attached volume match the one in CNS cache
if !multivc {
err := verifyVolumeMetadataInCNS(&e2eVSphere, pv.Spec.CSI.VolumeHandle,
volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
} else {
err := verifyVolumeMetadataInCNSForMultiVC(&multiVCe2eVSphere, pv.Spec.CSI.VolumeHandle,
volumespec.PersistentVolumeClaim.ClaimName, pv.ObjectMeta.Name, sspod.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}
}
}
}

for i := 0; i < pvcCount; i++ {
volumeID := pvs[i].Spec.CSI.VolumeHandle
ginkgo.By("Relocate volume from one shared datastore to another datastore using" +
"CnsRelocateVolume API")
dsRefDest := getDsMoRefFromURL(ctx, sharedDatastore2URL)
_, err = e2eVSphere.cnsRelocateVolume(e2eVSphere, ctx, volumeID, dsRefDest)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2eVSphere.verifyDatastoreMatch(volumeID, []string{sharedDatastore2URL})

ginkgo.By("Verify that the relocated CNS volumes are compliant and have correct policy id")
storagePolicyMatches, err := e2eVSphere.VerifySpbmPolicyOfVolume(volumeID, policyName)
e2eVSphere.verifyVolumeCompliance(volumeID, true)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(storagePolicyMatches).To(gomega.BeTrue(), "storage policy verification failed")
}

})
})
32 changes: 32 additions & 0 deletions tests/e2e/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -7017,5 +7017,37 @@ func removeStoragePolicyQuota(ctx context.Context, restClientConfig *rest.Config
pkgtypes.NamespacedName{Name: scName + storagePolicyQuota, Namespace: namespace}, spq)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Quota after removing: %s", spq.Spec.Limit)
}

// getK8sMasterIP gets k8s master ip in vanilla setup.
func getK8sNodeIPs(ctx context.Context, client clientset.Interface) []string {
var err error
nodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
var k8sNodeIPs []string
for _, node := range nodes.Items {

addrs := node.Status.Addresses
for _, addr := range addrs {
if addr.Type == v1.NodeExternalIP && (net.ParseIP(addr.Address)).To4() != nil {
k8sNodeIPs = append(k8sNodeIPs, addr.Address)
}
}

}
gomega.Expect(k8sNodeIPs).NotTo(gomega.BeEmpty(), "Unable to find k8s node IP")
return k8sNodeIPs
}

//
func getK8sVmMos(ctx context.Context, client clientset.Interface, k8sNodeIpList []string) []vim25types.ManagedObjectReference {
vmIp2MoRefMap := vmIpToMoRefMap(ctx)

k8sNodeVmRef := []vim25types.ManagedObjectReference{}

for _, ip := range k8sNodeIpList {
k8sNodeVmRef = append(k8sNodeVmRef, vmIp2MoRefMap[ip])
}

return k8sNodeVmRef
}
Loading