From 6701b1314c24e02922333ab488d5509f4d8873bf Mon Sep 17 00:00:00 2001 From: Stijn IJzermans Date: Mon, 12 Aug 2024 15:36:09 +0200 Subject: [PATCH] Allow backups to be made incrementally (#114) --- README.md | 8 ++ docs/installation-using-cli.md | 2 + docs/installation-using-helm.md | 2 + src/cinder/block_store.go | 67 ++++++++-- src/cinder/block_store_test.go | 226 ++++++++++++++++++++++++++++++++ 5 files changed, 293 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 61da73e..050e8f5 100644 --- a/README.md +++ b/README.md @@ -123,6 +123,14 @@ Manila backup methods: - **Snapshot** - Create a snapshot using Manila. - **Clone** - Create a snapshot using Manila, but immediatelly create a volume from this snapshot and afterwards cleanup original snapshot. +#### Incremental backups + +For backup method `backups`, incremental backups are supported. This will however lead to the inability to delete backups in a reverse order, since it is not possible to delete a (older) backup that has dependant (newer) backups. + +**Important: TTL is not working correctly with expiration, so set a large value such as (43800h, approx. 5 years)** since TTL inherently use a `First In, First Out` algorithm, where incremental backups should be deleted on a `Last In, First Out` basis. To cleanup these incremental backups, it's suggested to use the Velero CLI to perform deletion on this `Last In, First Out` basis. + +Next to that, when incremental backups are enabled, the first backup of a volume will always be a full backup, since this is needed to create increments on. + ### Consistency and Durability Please note two facts regarding volume backups: diff --git a/docs/installation-using-cli.md b/docs/installation-using-cli.md index 2ae0c1c..4d6ea66 100644 --- a/docs/installation-using-cli.md +++ b/docs/installation-using-cli.md @@ -83,6 +83,8 @@ spec: # deletes all dependent volume resources (i.e. snapshots) before deleting # the clone volume (works only, when a snapshot method is set to clone) cascadeDelete: "true" + # backups will be created incrementally (works only when snapshot method is set to backup) + incrementalBackup: "true" ``` For backups of Manila shares create another configuration of `volumesnapshotlocations.velero.io`: diff --git a/docs/installation-using-helm.md b/docs/installation-using-helm.md index 04f95db..7585f66 100644 --- a/docs/installation-using-helm.md +++ b/docs/installation-using-helm.md @@ -65,6 +65,8 @@ configuration: # deletes all dependent volume resources (i.e. snapshots) before deleting # the clone volume (works only, when a snapshot method is set to clone) cascadeDelete: "true" + # backups will be created incrementally (works only when snapshot method is set to backup) + incrementalBackup: "true" # for Manila shared filesystem storage - name: manila provider: community.openstack.org/openstack-manila diff --git a/src/cinder/block_store.go b/src/cinder/block_store.go index 028941c..09c0b5f 100644 --- a/src/cinder/block_store.go +++ b/src/cinder/block_store.go @@ -99,6 +99,7 @@ type BlockStore struct { cascadeDelete bool containerName string log logrus.FieldLogger + backupIncremental bool } // NewBlockStore instantiates a Cinder Volume Snapshotter. @@ -158,6 +159,10 @@ func (b *BlockStore) Init(config map[string]string) error { if err != nil { return fmt.Errorf("cannot parse cascadeDelete config variable: %w", err) } + b.backupIncremental, err = strconv.ParseBool(utils.GetConf(b.config, "backupIncremental", "false")) + if err != nil { + return fmt.Errorf("cannot parse backupIncremental config variable: %w", err) + } // load optional containerName b.containerName = utils.GetConf(b.config, "containerName", "") @@ -321,12 +326,13 @@ func (b *BlockStore) createVolumeFromClone(cloneID, volumeType, volumeAZ string) func (b *BlockStore) createVolumeFromBackup(backupID, volumeType, volumeAZ string) (string, error) { logWithFields := b.log.WithFields(logrus.Fields{ - "backupID": backupID, - "volumeType": volumeType, - "volumeAZ": volumeAZ, - "backupTimeout": b.backupTimeout, - "volumeTimeout": b.volumeTimeout, - "method": b.config["method"], + "backupID": backupID, + "volumeType": volumeType, + "volumeAZ": volumeAZ, + "backupTimeout": b.backupTimeout, + "backupIncremental": b.backupIncremental, + "volumeTimeout": b.volumeTimeout, + "method": b.config["method"], }) logWithFields.Info("BlockStore.CreateVolumeFromSnapshot called") @@ -588,12 +594,13 @@ func (b *BlockStore) createClone(volumeID, volumeAZ string, tags map[string]stri func (b *BlockStore) createBackup(volumeID, volumeAZ string, tags map[string]string) (string, error) { backupName := fmt.Sprintf("%s.backup.%s", volumeID, strconv.FormatUint(utils.Rand.Uint64(), 10)) logWithFields := b.log.WithFields(logrus.Fields{ - "backupName": backupName, - "volumeID": volumeID, - "volumeAZ": volumeAZ, - "tags": tags, - "backupTimeout": b.backupTimeout, - "method": b.config["method"], + "backupName": backupName, + "volumeID": volumeID, + "volumeAZ": volumeAZ, + "tags": tags, + "backupTimeout": b.backupTimeout, + "backupIncremental": b.backupIncremental, + "method": b.config["method"], }) logWithFields.Info("BlockStore.CreateSnapshot called") @@ -603,6 +610,20 @@ func (b *BlockStore) createBackup(volumeID, volumeAZ string, tags map[string]str return "", fmt.Errorf("failed to get volume %v from cinder: %w", volumeID, err) } + existingBackups, err := b.getVolumeBackups(logWithFields, volumeID) + if err != nil { + logWithFields.Error("failed to retrieve existing volume backups.") + return "", fmt.Errorf("failed to retrieve existing backups %v from cinder: %w", volumeID, err) + } + + var existingBackup *backups.Backup + for _, b := range existingBackups { + if b.VolumeID == volumeID && utils.SliceContains(backupStatuses, b.Status) { + existingBackup = &b + break + } + } + opts := &backups.CreateOpts{ Name: backupName, VolumeID: volumeID, @@ -610,6 +631,7 @@ func (b *BlockStore) createBackup(volumeID, volumeAZ string, tags map[string]str Container: backupName, Metadata: utils.Merge(originVolume.Metadata, tags), Force: true, + Incremental: b.backupIncremental, } // Override container if one was passed by the user @@ -617,6 +639,12 @@ func (b *BlockStore) createBackup(volumeID, volumeAZ string, tags map[string]str opts.Container = b.containerName } + // Disable incremental backup for volume where no backup exists yet. + if b.backupIncremental && existingBackup == nil { + logWithFields.Infof("No backup exists yet for volume %s, will first run a full backup.", volumeID) + opts.Incremental = false + } + backup, err := backups.Create(b.client, opts).Extract() if err != nil { logWithFields.Error("failed to create backup from volume") @@ -1093,3 +1121,18 @@ func expandVolumeProperties(log logrus.FieldLogger, volume *volumes.Volume) imag } return imgAttrUpdateOpts } + +func (b *BlockStore) getVolumeBackups(logWithFields *logrus.Entry, volumeID string) ([]backups.Backup, error) { + // use detail and a non-volumeid search to allow usage of later microversions + pages, err := backups.ListDetail(b.client, nil).AllPages() + if err != nil { + return nil, fmt.Errorf("failed to list backups: %w", err) + } + + allBackups, err := backups.ExtractBackups(pages) + if err != nil { + return nil, fmt.Errorf("failed to extract backups: %w", err) + } + + return allBackups, nil +} diff --git a/src/cinder/block_store_test.go b/src/cinder/block_store_test.go index 210dbb5..3a003ad 100644 --- a/src/cinder/block_store_test.go +++ b/src/cinder/block_store_test.go @@ -1,6 +1,7 @@ package cinder import ( + "encoding/json" "fmt" "net/http" "testing" @@ -8,6 +9,7 @@ import ( th "github.com/gophercloud/gophercloud/testhelper" fakeClient "github.com/gophercloud/gophercloud/testhelper/client" "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" ) const ID = "0123456789" @@ -132,6 +134,151 @@ const tokenResp = `{ } } }` +const listDetailResponse = `{ + "backups": [ + { + "id": "289da7f8-6440-407c-9fb4-7db01ec49164", + "name": "backup-001", + "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + "description": "Daily Backup", + "status": "available", + "size": 30, + "created_at": "2017-05-30T03:35:03.000000" + }, + { + "id": "96c3bda7-c82a-4f50-be73-ca7621794835", + "name": "backup-002", + "volume_id": "76b8950a-8594-4e5b-8dce-0dfa9c696358", + "description": "Weekly Backup", + "status": "available", + "size": 25, + "created_at": "2017-05-30T03:35:03.000000" + } + ], + "backups_links": [ + { + "href": "%s/backups/detail?marker=1", + "rel": "next" + } + ] +} +` +const createBackupResponse = `{ + "backup": { + "volume_id": "1234", + "name": "backup-001", + "id": "%s", + "description": "Daily backup", + "volume_id": "1234", + "status": "available", + "size": 30, + "created_at": "2017-05-30T03:35:03.000000" + } + } +` +const getBackupResponse = `{ + "backup": { + "volume_id": "1234", + "name": "backup-001", + "id": "%s", + "description": "Daily backup", + "volume_id": "1234", + "status": "available", + "size": 30, + "created_at": "2017-05-30T03:35:03.000000" + } +}` +const getVolumeResponse = `{ + "volume": { + "volume_type": "lvmdriver-1", + "created_at": "2015-09-17T03:32:29.000000", + "bootable": "false", + "name": "vol-001", + "os-vol-mig-status-attr:name_id": null, + "consistencygroup_id": null, + "source_volid": null, + "os-volume-replication:driver_data": null, + "multiattach": false, + "snapshot_id": null, + "replication_status": "disabled", + "os-volume-replication:extended_status": null, + "encrypted": false, + "availability_zone": "nova", + "attachments": [{ + "server_id": "83ec2e3b-4321-422b-8706-a84185f52a0a", + "attachment_id": "05551600-a936-4d4a-ba42-79a037c1-c91a", + "attached_at": "2016-08-06T14:48:20.000000", + "host_name": "foobar", + "volume_id": "%[1]s", + "device": "/dev/vdc", + "id": "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75" + }], + "id": "%[1]s", + "size": 75, + "user_id": "ff1ce52c03ab433aaba9108c2e3ef541", + "os-vol-tenant-attr:tenant_id": "304dc00909ac4d0da6c62d816bcb3459", + "os-vol-mig-status-attr:migstat": null, + "metadata": {}, + "status": "available", + "volume_image_metadata": { + "container_format": "bare", + "image_name": "centos" + }, + "description": null + } +}` + +func handleListBackupsDetail(t *testing.T) { + th.Mux.HandleFunc("/backups/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeClient.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + if err := r.ParseForm(); err != nil { + t.Errorf("Failed to parse request form %v", err) + } + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, listDetailResponse, th.Server.URL) + case "1": + fmt.Fprintf(w, `{"backups": []}`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +func handleGetVolume(t *testing.T, volumeID string) { + th.Mux.HandleFunc(fmt.Sprintf("/volumes/%s", volumeID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeClient.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, getVolumeResponse, volumeID, volumeID) + }) +} + +func handleGetBackup(t *testing.T, backupID string) { + th.Mux.HandleFunc(fmt.Sprintf("/backups/%s", backupID), func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeClient.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, getBackupResponse, backupID) + }) +} + +// CreateIncrementalBackupRequest represents the top-level structure containing the Backup object. +type CreateIncrementalBackupRequest struct { + Backup struct { + Incremental bool `json:"incremental"` + } `json:"backup"` +} // TestInit performs standard block store initialization // which includes creation of auth client, authentication and @@ -169,3 +316,82 @@ func TestSimpleBlockStorageInit(t *testing.T) { t.Error(err) } } + +func TestGetVolumeBackups(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + handleListBackupsDetail(t) + store := BlockStore{ + client: fakeClient.ServiceClient(), + log: logrus.New(), + } + + volumeID := "76b8950a-8594-4e5b-8dce-0dfa9c696358" + logWithFields := store.log.WithFields(logrus.Fields{"volumeId": volumeID}) + allBackups, err := store.getVolumeBackups(logWithFields, volumeID) + + if !assert.Nil(t, err) { + t.FailNow() + } + + numOfBackups := len(allBackups) + if numOfBackups != 2 { + t.Errorf("Expected 2 backups, got %d", numOfBackups) + } +} + +func TestCreateBackup(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + backupID := "d32019d3-bc6e-4319-9c1d-6722fc136a22" + var createRequest *CreateIncrementalBackupRequest + + handleListBackupsDetail(t) + handleGetBackup(t, backupID) + + th.Mux.HandleFunc("/backups", func(w http.ResponseWriter, r *http.Request) { + // Reset createRequest for each request. + createRequest = &CreateIncrementalBackupRequest{} + + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fakeClient.TokenID) + json.NewDecoder(r.Body).Decode(&createRequest) + + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, createBackupResponse, backupID) + }) + + store := BlockStore{ + client: fakeClient.ServiceClient(), + log: logrus.New(), + backupIncremental: true, + backupTimeout: 3, + } + + tests := []struct { + volumeID string + expectedIncrementalValue bool + }{ + {"521752a6-acf6-4b2d-bc7a-119f9148cd8c", true}, // volume with existing backups + {"591752a6-acf6-4b2d-bc7a-119f9148cd8c", false}, // volume without existing backups + } + + for _, tt := range tests { + handleGetVolume(t, tt.volumeID) + createdBackupID, err := store.createBackup(tt.volumeID, "default", map[string]string{}) + + if createRequest.Backup.Incremental != tt.expectedIncrementalValue { + t.Errorf("expected incremental backup to be set to %v, got %v", tt.expectedIncrementalValue, createRequest.Backup.Incremental) + } + + if createdBackupID != backupID { + t.Errorf("expected created backup ID to be %v, got %v", backupID, createdBackupID) + } + + if !assert.Nil(t, err) { + t.FailNow() + } + } +}