Skip to content

Commit

Permalink
Merge pull request #657 from sunpa93/filesystem-resize
Browse files Browse the repository at this point in the history
fix: resize filesystem if cloned volume capacity is larger than the source volume
  • Loading branch information
andyzhangx authored Jan 15, 2021
2 parents 1b85c49 + fc97c59 commit 19d1497
Show file tree
Hide file tree
Showing 5 changed files with 217 additions and 11 deletions.
46 changes: 41 additions & 5 deletions pkg/azuredisk/controllerserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package azuredisk
import (
"context"
"fmt"
"path"
"strconv"
"strings"

Expand Down Expand Up @@ -63,11 +64,13 @@ var (
)

const (
sourceSnapshot = "snapshot"
sourceVolume = "volume"
azureDiskCSIDriverName = "azuredisk_csi_driver"
NotFound = "NotFound"
CreatedForPVNameKey = "kubernetes.io-created-for-pv-name"
sourceSnapshot = "snapshot"
sourceVolume = "volume"
azureDiskCSIDriverName = "azuredisk_csi_driver"
NotFound = "NotFound"
CreatedForPVNameKey = "kubernetes.io-created-for-pv-name"
resizeRequired = "resizeRequired"
sourceDiskSearchMaxDepth = 10
)

// CreateVolume provisions an azure disk
Expand Down Expand Up @@ -160,6 +163,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest)
//return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k)
}
}
parameters[resizeRequired] = strconv.FormatBool(false)

if IsAzureStackCloud(d.cloud) {
if maxShares > 1 {
Expand Down Expand Up @@ -258,6 +262,11 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest)
},
}

ctx, cancel := context.WithCancel(context.Background())
if sourceGiB, _ := d.GetSourceDiskSize(ctx, resourceGroup, path.Base(sourceID), 0, sourceDiskSearchMaxDepth); sourceGiB != nil && *sourceGiB < int32(requestGiB) {
parameters[resizeRequired] = strconv.FormatBool(true)
}
cancel()
}
}

Expand Down Expand Up @@ -796,6 +805,33 @@ func (d *Driver) getSnapshotByID(ctx context.Context, resourceGroup, snapshotID,
return generateCSISnapshot(sourceVolumeID, &snapshot)
}

// GetSourceDiskSize recursively searches for the sourceDisk and returns: sourceDisk disk size, error
func (d *Driver) GetSourceDiskSize(ctx context.Context, resourceGroup, diskName string, curDepth, maxDepth int) (*int32, error) {
if curDepth > maxDepth {
return nil, status.Error(codes.Internal, fmt.Sprintf("current depth (%d) surpassed the max depth (%d) while searching for the source disk size", curDepth, maxDepth))
}
result, rerr := d.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
if rerr != nil {
return nil, rerr.Error()
}
if result.DiskProperties == nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("DiskProperty not found for disk (%s) in resource group (%s)", diskName, resourceGroup))
}

if result.DiskProperties.CreationData != nil && (*result.DiskProperties.CreationData).CreateOption == "Copy" {
klog.V(2).Infof("Clone source disk has a parent source")
sourceResourceID := *result.DiskProperties.CreationData.SourceResourceID
parentResourceGroup, _ := GetResourceGroupFromURI(sourceResourceID)
parentDiskName := path.Base(sourceResourceID)
return d.GetSourceDiskSize(ctx, parentResourceGroup, parentDiskName, curDepth+1, maxDepth)
}

if (*result.DiskProperties).DiskSizeGB == nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("DiskSizeGB for disk (%s) in resourcegroup (%s) is nil", diskName, resourceGroup))
}
return (*result.DiskProperties).DiskSizeGB, nil
}

func isValidVolumeCapabilities(volCaps []*csi.VolumeCapability) bool {
hasSupport := func(cap *csi.VolumeCapability) bool {
for _, c := range volumeCaps {
Expand Down
102 changes: 102 additions & 0 deletions pkg/azuredisk/controllerserver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1784,3 +1784,105 @@ func TestPickAvailabilityZone(t *testing.T) {
t.Run(tc.name, tc.testFunc)
}
}

func TestGetSourceDiskSize(t *testing.T) {
testCases := []struct {
name string
testFunc func(t *testing.T)
}{
{
name: "max depth reached",
testFunc: func(t *testing.T) {
d, _ := NewFakeDriver(t)
_, err := d.GetSourceDiskSize(context.Background(), "test-rg", "test-disk", 2, 1)
expectedErr := status.Errorf(codes.Internal, "current depth (2) surpassed the max depth (1) while searching for the source disk size")
if !reflect.DeepEqual(err, expectedErr) {
t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr)
}
},
},
{
name: "diskproperty not found",
testFunc: func(t *testing.T) {
d, _ := NewFakeDriver(t)
disk := compute.Disk{}
d.cloud.DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes()
_, err := d.GetSourceDiskSize(context.Background(), "test-rg", "test-disk", 0, 1)
expectedErr := status.Error(codes.Internal, "DiskProperty not found for disk (test-disk) in resource group (test-rg)")
if !reflect.DeepEqual(err, expectedErr) {
t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr)
}
},
},
{
name: "nil DiskSizeGB",
testFunc: func(t *testing.T) {
d, _ := NewFakeDriver(t)
diskProperties := compute.DiskProperties{}
disk := compute.Disk{
DiskProperties: &diskProperties,
}
d.cloud.DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes()
_, err := d.GetSourceDiskSize(context.Background(), "test-rg", "test-disk", 0, 1)
expectedErr := status.Error(codes.Internal, "DiskSizeGB for disk (test-disk) in resourcegroup (test-rg) is nil")
if !reflect.DeepEqual(err, expectedErr) {
t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr)
}
},
},
{
name: "successful search: depth 1",
testFunc: func(t *testing.T) {
d, _ := NewFakeDriver(t)
diskSizeGB := int32(8)
diskProperties := compute.DiskProperties{
DiskSizeGB: &diskSizeGB,
}
disk := compute.Disk{
DiskProperties: &diskProperties,
}
d.cloud.DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk, nil).AnyTimes()
size, _ := d.GetSourceDiskSize(context.Background(), "test-rg", "test-disk", 0, 1)
expectedOutput := diskSizeGB
if *size != expectedOutput {
t.Errorf("actualOutput: (%v), expectedOutput: (%v)", *size, expectedOutput)
}
},
},
{
name: "successful serach: depth 2",
testFunc: func(t *testing.T) {
d, _ := NewFakeDriver(t)
diskSizeGB1 := int32(16)
diskSizeGB2 := int32(8)
sourceURI := "/subscriptions/xxxxxxxx/resourcegroups/test-rg/providers/microsoft.compute/disks/test-disk-1"
creationData := compute.CreationData{
CreateOption: "Copy",
SourceURI: &sourceURI,
}
diskProperties1 := compute.DiskProperties{
CreationData: &creationData,
DiskSizeGB: &diskSizeGB1,
}
diskProperties2 := compute.DiskProperties{
DiskSizeGB: &diskSizeGB2,
}
disk1 := compute.Disk{
DiskProperties: &diskProperties1,
}
disk2 := compute.Disk{
DiskProperties: &diskProperties2,
}
d.cloud.DisksClient.(*mockdiskclient.MockInterface).EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(disk1, nil).Return(disk2, nil).AnyTimes()
size, _ := d.GetSourceDiskSize(context.Background(), "test-rg", "test-disk-1", 0, 2)
expectedOutput := diskSizeGB2
if *size != expectedOutput {
t.Errorf("actualOutput: (%v), expectedOutput: (%v)", *size, expectedOutput)
}
},
},
}
for _, tc := range testCases {
t.Run(tc.name, tc.testFunc)
}
}
34 changes: 32 additions & 2 deletions pkg/azuredisk/nodeserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,21 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe
}
klog.V(2).Infof("NodeStageVolume: format %s and mounting at %s successfully.", source, target)

// if resize is required, resize filesystem
if required, ok := req.GetVolumeContext()[resizeRequired]; ok && required == "true" {
klog.V(2).Infof("NodeStageVolume: fs resize initiating on target(%s) volumeid(%s)", target, diskURI)
if err != nil {
return nil, status.Errorf(codes.Internal, "NodeStageVolume: Could not get volume path for %s: %v", target, err)
}

resizer := resizefs.NewResizeFs(d.mounter)
if _, err := resizer.Resize(source, target); err != nil {
return nil, status.Errorf(codes.Internal, "NodeStageVolume: Could not resize volume %q (%q): %v", diskURI, source, err)
}

klog.V(2).Infof("NodeStageVolume: fs resize successful on target(%s) volumeid(%s).", target, diskURI)
}

return &csi.NodeStageVolumeResponse{}, nil
}

Expand Down Expand Up @@ -216,6 +231,7 @@ func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu
if err := d.mounter.Mount(source, target, "", mountOptions); err != nil {
return nil, status.Errorf(codes.Internal, "Could not mount %q at %q: %v", source, target, err)
}

klog.V(2).Infof("NodePublishVolume: mount %s at %s successfully", source, target)

return &csi.NodePublishVolumeResponse{}, nil
Expand Down Expand Up @@ -409,8 +425,7 @@ func (d *Driver) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolume
// In kubernetes 1.19 above, it will be passed by kubelet.
devicePath := req.GetStagingTargetPath()
if devicePath == "" {
args := []string{"-o", "source", "--noheadings", "--mountpoint", volumePath}
output, err := d.mounter.Exec.Command("findmnt", args...).Output()
output, err := d.getDevicePathWithMountPath(volumePath)
if err != nil {
return nil, status.Errorf(codes.NotFound, "Could not determine device path: %v", err)
}
Expand Down Expand Up @@ -543,6 +558,21 @@ func (d *Driver) getDevicePathWithLUN(lunStr string) (string, error) {
return newDevicePath, err
}

func (d *Driver) getDevicePathWithMountPath(mountPath string) (string, error) {
args := []string{"-o", "source", "--noheadings", "--mountpoint", mountPath}
output, err := d.mounter.Exec.Command("findmnt", args...).Output()
if err != nil {
return "", status.Errorf(codes.Internal, "Could not determine device path: %v", err)
}

devicePath := strings.TrimSpace(string(output))
if len(devicePath) == 0 {
return "", status.Errorf(codes.Internal, "Could not get valid device for mount path: %q", mountPath)
}

return devicePath, nil
}

func (d *Driver) getBlockSizeBytes(devicePath string) (int64, error) {
output, err := d.mounter.Exec.Command("blockdev", "--getsize64", devicePath).Output()
if err != nil {
Expand Down
39 changes: 35 additions & 4 deletions test/e2e/dynamic_provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -350,10 +350,6 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) {
ginkgo.It("should clone a volume from an existing volume and read from it [disk.csi.azure.com]", func() {
skipIfTestingInWindowsCluster()
skipIfUsingInTreeVolumePlugin()
if !isMultiZone {
// todo: remove this when single-az tests are runnong on 1.16
ginkgo.Skip("test case not supported by single-az test since it's running on 1.15")
}

pod := testsuites.PodDetails{
Cmd: "echo 'hello world' > /mnt/test-1/data",
Expand Down Expand Up @@ -383,6 +379,41 @@ func (t *dynamicProvisioningTestSuite) defineTests(isMultiZone bool) {
test.Run(cs, ns)
})

ginkgo.It("should clone a volume of larger size than the source volume and make sure the filesystem is appropriately adjusted [disk.csi.azure.com]", func() {
skipIfTestingInWindowsCluster()
skipIfUsingInTreeVolumePlugin()

pod := testsuites.PodDetails{
Volumes: t.normalizeVolumes([]testsuites.VolumeDetails{
{
FSType: "ext4",
ClaimSize: "10Gi",
VolumeMount: testsuites.VolumeMountDetails{
NameGenerate: "test-volume-",
MountPathGenerate: "/mnt/test-",
},
},
}, isMultiZone),
}
clonedVolumeSize := "20Gi"

podWithClonedVolume := testsuites.PodDetails{
Cmd: "df -h | grep /mnt/test- | awk '{print $2}' | grep 20.0G",
}

test := testsuites.DynamicallyProvisionedVolumeCloningTest{
CSIDriver: testDriver,
Pod: pod,
PodWithClonedVolume: podWithClonedVolume,
ClonedVolumeSize: clonedVolumeSize,
StorageClassParameters: map[string]string{
"skuName": "Standard_LRS",
"fsType": "xfs",
},
}
test.Run(cs, ns)
})

ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to a single pod [kubernetes.io/azure-disk] [disk.csi.azure.com] [Windows]", func() {
pods := []testsuites.PodDetails{
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,12 @@ import (
)

// DynamicallyProvisionedVolumeCloningTest will provision required StorageClass(es), PVC(s) and Pod(s)
// ClonedVolumeSize optional for when testing for cloned volume with different size to the original volume
type DynamicallyProvisionedVolumeCloningTest struct {
CSIDriver driver.DynamicPVTestDriver
Pod PodDetails
PodWithClonedVolume PodDetails
ClonedVolumeSize string
StorageClassParameters map[string]string
}

Expand Down Expand Up @@ -57,6 +59,11 @@ func (t *DynamicallyProvisionedVolumeCloningTest) Run(client clientset.Interface
Kind: VolumePVCKind,
}
clonedVolume.StorageClass = tsc.storageClass

if t.ClonedVolumeSize != "" {
clonedVolume.ClaimSize = t.ClonedVolumeSize
}

t.PodWithClonedVolume.Volumes = []VolumeDetails{clonedVolume}
tpod, cleanups = t.PodWithClonedVolume.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters)
for i := range cleanups {
Expand Down

0 comments on commit 19d1497

Please sign in to comment.