diff --git a/Makefile b/Makefile index 2eb00f4911..42b558d27e 100644 --- a/Makefile +++ b/Makefile @@ -930,15 +930,9 @@ clean-ci: ## Cleanup orphaned objects in CI @if [ -z "${GOVC_USERNAME}" ]; then echo "GOVC_USERNAME is not set"; exit 1; fi @if [ -z "${GOVC_PASSWORD}" ]; then echo "GOVC_PASSWORD is not set"; exit 1; fi @if [ -z "${GOVC_URL}" ]; then echo "GOVC_URL is not set"; exit 1; fi - go run $(JANITOR_DIR) \ - --dry-run=false \ - --max-age=12h \ - --ipam-namespace=default \ - --folder=/SDDC-Datacenter/vm/Workloads/cluster-api-provider-vsphere \ - --resource-pool=/SDDC-Datacenter/host/Cluster-1/Resources/Compute-ResourcePool/cluster-api-provider-vsphere \ - --vm-folder=/SDDC-Datacenter/vm/Workloads/cluster-api-provider-vsphere \ - --vm-folder=/SDDC-Datacenter/vm/Workloads/cloud-provider-vsphere \ - --vm-folder=/SDDC-Datacenter/vm/Workloads/image-builder + @if [ -z "${VSPHERE_TLS_THUMBPRINT}" ]; then echo "VSPHERE_TLS_THUMBPRINT is not set"; exit 1; fi + @if [ -z "${BOSKOS_HOST}" ]; then echo "BOSKOS_HOST is not set"; exit 1; fi + go run $(JANITOR_DIR) --dry-run=false .PHONY: clean-temporary clean-temporary: ## Remove all temporary files and folders diff --git a/hack/tools/boskosctl/main.go b/hack/tools/boskosctl/main.go index e7de7c03e9..ac6f3a43f9 100644 --- a/hack/tools/boskosctl/main.go +++ b/hack/tools/boskosctl/main.go @@ -359,8 +359,7 @@ func release(ctx context.Context, client *boskos.Client, resourceName, vSphereUs defer vSphereClients.Logout(ctx) // Delete all VMs created up until now. - maxCreationDate := time.Now() - j := janitor.NewJanitor(vSphereClients, nil, maxCreationDate, "", false) + j := janitor.NewJanitor(vSphereClients, false) log.Info("Cleaning up vSphere") // Note: We intentionally want to skip clusterModule cleanup. If we run this too often we might hit race conditions diff --git a/hack/tools/janitor/main.go b/hack/tools/janitor/main.go index f4e8d640dc..755c0e7e38 100644 --- a/hack/tools/janitor/main.go +++ b/hack/tools/janitor/main.go @@ -20,8 +20,8 @@ package main import ( "context" "flag" + "fmt" "os" - "time" "github.com/pkg/errors" "github.com/spf13/pflag" @@ -30,7 +30,6 @@ import ( "k8s.io/klog/v2" ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api-provider-vsphere/hack/tools/pkg/boskos" "sigs.k8s.io/cluster-api-provider-vsphere/hack/tools/pkg/janitor" @@ -45,16 +44,9 @@ func init() { var ( dryRun bool - ipamNamespace string - maxAge time.Duration - // Flags to get folders and resource pools from Boskos. boskosHost string resourceOwner string resourceTypes []string - // Flags to directly specify folders and resource pools. - vsphereVMFolders []string - vsphereFolders []string - vsphereResourcePools []string ) func initFlags(fs *pflag.FlagSet) { @@ -62,11 +54,6 @@ func initFlags(fs *pflag.FlagSet) { fs.StringVar(&boskosHost, "boskos-host", os.Getenv("BOSKOS_HOST"), "Boskos server URL. Boskos is only used to retrieve resources if this flag is set.") fs.StringVar(&resourceOwner, "resource-owner", "vsphere-janitor", "Owner for the resource.") fs.StringArrayVar(&resourceTypes, "resource-type", []string{"vsphere-project-cluster-api-provider", "vsphere-project-cloud-provider", "vsphere-project-image-builder"}, "Types of the resources") - fs.StringArrayVar(&vsphereVMFolders, "vm-folder", []string{}, "Path to folders in vCenter to cleanup virtual machines.") - fs.StringArrayVar(&vsphereFolders, "folder", []string{}, "Path to a folder in vCenter to recursively cleanup empty subfolders.") - fs.StringArrayVar(&vsphereResourcePools, "resource-pool", []string{}, "Path to a resource pool in vCenter to recursively cleanup empty child resource pools.") - fs.StringVar(&ipamNamespace, "ipam-namespace", "", "Namespace for IPAddressClaim cleanup.") - fs.DurationVar(&maxAge, "max-age", time.Hour*12, "Maximum age of an object before it is getting deleted.") fs.BoolVar(&dryRun, "dry-run", false, "dry-run results in not deleting anything but printing the actions.") } @@ -90,6 +77,17 @@ func main() { func run(ctx context.Context) error { log := ctrl.LoggerFrom(ctx) + log.Info("Configured settings", "dry-run", dryRun) + + if boskosHost == "" { + return fmt.Errorf("--boskos-host must be set") + } + if resourceOwner == "" { + return fmt.Errorf("--resource-owner must be set") + } + if len(resourceTypes) == 0 { + return fmt.Errorf("--resource-type must be set") + } // Create clients for vSphere. vSphereClients, err := janitor.NewVSphereClients(ctx, janitor.NewVSphereClientsInput{ @@ -104,111 +102,76 @@ func run(ctx context.Context) error { } defer vSphereClients.Logout(ctx) - // Create controller-runtime client for IPAM. - restConfig, err := ctrl.GetConfig() - if err != nil { - return errors.Wrap(err, "unable to get kubeconfig") - } - ipamClient, err := client.New(restConfig, client.Options{Scheme: ipamScheme}) + log = log.WithValues("boskosHost", boskosHost, "resourceOwner", resourceOwner) + log.Info("Getting resources to cleanup from Boskos") + client, err := boskos.NewClient(resourceOwner, boskosHost) if err != nil { - return errors.Wrap(err, "creating IPAM client") + return err } - if boskosHost != "" { - log = log.WithValues("boskosHost", boskosHost, "resourceOwner", resourceOwner) - log.Info("Getting resources to cleanup from Boskos") - client, err := boskos.NewClient(resourceOwner, boskosHost) - if err != nil { - return err - } - - var allErrs []error - for _, resourceType := range resourceTypes { - // For all resource in state dirty that are currently not owned: - // * acquire the resource (and set it to state "cleaning") - // * try to clean up vSphere - // * if cleanup succeeds, release the resource as free - // * if cleanup fails, resource will stay in cleaning and become stale (reaper will move it to dirty) - for { - log.Info("Acquiring resource") - res, err := client.Acquire(resourceType, boskos.Dirty, boskos.Cleaning) - if err != nil { - // If we get an error on acquire we're done looping through all dirty resources - if errors.Is(err, boskos.ErrNotFound) { - // Note: ErrNotFound means there are no more dirty resources that are not owned. - log.Info("No more resources to cleanup") - break - } - allErrs = append(allErrs, errors.Wrapf(err, "failed to acquire resource")) + var allErrs []error + for _, resourceType := range resourceTypes { + log = log.WithValues("resourceType", resourceType) + // For all resource in state dirty that are currently not owned: + // * acquire the resource (and set it to state "cleaning") + // * try to clean up vSphere + // * if cleanup succeeds, release the resource as free + // * if cleanup fails, resource will stay in cleaning and become stale (reaper will move it to dirty) + for { + log.Info("Acquiring resource") + res, err := client.Acquire(resourceType, boskos.Dirty, boskos.Cleaning) + if err != nil { + // If we get an error on acquire we're done looping through all dirty resources + if errors.Is(err, boskos.ErrNotFound) { + // Note: ErrNotFound means there are no more dirty resources that are not owned. + log.Info("No more resources to cleanup") break } - log := log.WithValues("resourceName", res.Name) + allErrs = append(allErrs, errors.Wrapf(err, "failed to acquire resource")) + break + } + log := log.WithValues("resourceName", res.Name) - if res.UserData == nil { - allErrs = append(allErrs, errors.Errorf("failed to get user data, resource %q is missing user data", res.Name)) - continue - } + if res.UserData == nil { + allErrs = append(allErrs, errors.Errorf("failed to get user data, resource %q is missing user data", res.Name)) + continue + } - folder, hasFolder := res.UserData.Load("folder") - if !hasFolder { - allErrs = append(allErrs, errors.Errorf("failed to get user data, resource %q is missing \"folder\" key", res.Name)) - continue - } - resourcePool, hasResourcePool := res.UserData.Load("resourcePool") - if !hasResourcePool { - allErrs = append(allErrs, errors.Errorf("failed to get user data, resource %q is missing \"resourcePool\" key", res.Name)) - continue - } + folder, hasFolder := res.UserData.Load("folder") + if !hasFolder { + allErrs = append(allErrs, errors.Errorf("failed to get user data, resource %q is missing \"folder\" key", res.Name)) + continue + } + resourcePool, hasResourcePool := res.UserData.Load("resourcePool") + if !hasResourcePool { + allErrs = append(allErrs, errors.Errorf("failed to get user data, resource %q is missing \"resourcePool\" key", res.Name)) + continue + } - // Delete all VMs created up until now. - maxCreationDate := time.Now() - j := janitor.NewJanitor(vSphereClients, nil, maxCreationDate, "", false) + j := janitor.NewJanitor(vSphereClients, false) - log.Info("Cleaning up vSphere") - if err := j.CleanupVSphere(ctx, []string{folder.(string)}, []string{resourcePool.(string)}, []string{folder.(string)}, false); err != nil { - log.Info("Cleaning up vSphere failed") + log.Info("Cleaning up vSphere") + if err := j.CleanupVSphere(ctx, []string{folder.(string)}, []string{resourcePool.(string)}, []string{folder.(string)}, false); err != nil { + log.Info("Cleaning up vSphere failed") - // Intentionally keep this resource in cleaning state. The reaper will move it from cleaning to dirty - // and we'll retry the cleanup. - // If we move it to dirty here, the for loop will pick it up again, and we get stuck in an infinite loop. - allErrs = append(allErrs, errors.Wrapf(err, "cleaning up vSphere failed, resource %q will now become stale", res.Name)) - continue - } - log.Info("Cleaning up vSphere succeeded") + // Intentionally keep this resource in cleaning state. The reaper will move it from cleaning to dirty + // and we'll retry the cleanup. + // If we move it to dirty here, the for loop will pick it up again, and we get stuck in an infinite loop. + allErrs = append(allErrs, errors.Wrapf(err, "cleaning up vSphere failed, resource %q will now become stale", res.Name)) + continue + } + log.Info("Cleaning up vSphere succeeded") - // Try to release resource as free. - log.Info("Releasing resource as free") - if releaseErr := client.Release(res.Name, boskos.Free); releaseErr != nil { - allErrs = append(allErrs, errors.Wrapf(releaseErr, "cleaning up vSphere succeeded and releasing resource as free failed, resource %q will now become stale", res.Name)) - } - log.Info("Releasing resource as free succeeded") + // Try to release resource as free. + log.Info("Releasing resource as free") + if releaseErr := client.Release(res.Name, boskos.Free); releaseErr != nil { + allErrs = append(allErrs, errors.Wrapf(releaseErr, "cleaning up vSphere succeeded and releasing resource as free failed, resource %q will now become stale", res.Name)) } + log.Info("Releasing resource as free succeeded") } - if len(allErrs) > 0 { - return errors.Wrap(kerrors.NewAggregate(allErrs), "cleaning up Boskos resources") - } - } - - // Note: The following will be deleted once we migrated all repos to Boskos. - maxCreationDate := time.Now().Add(-maxAge) - janitor := janitor.NewJanitor(vSphereClients, ipamClient, maxCreationDate, ipamNamespace, dryRun) - - log.Info("Configured settings", "dry-run", dryRun) - log.Info("Configured settings", "folders", vsphereFolders) - log.Info("Configured settings", "vm-folders", vsphereVMFolders) - log.Info("Configured settings", "resource-pools", vsphereResourcePools) - log.Info("Configured settings", "ipam-namespace", ipamNamespace) - log.Info("Configured settings", "max-age", maxAge) - log.Info("Configured settings", "janitor.maxCreationDate", maxCreationDate) - - // First cleanup old vms and other vSphere resources to free up IPAddressClaims or cluster modules which are still in-use. - if err := janitor.CleanupVSphere(ctx, vsphereFolders, vsphereResourcePools, vsphereVMFolders, false); err != nil { - return errors.Wrap(err, "cleaning up vSphere") } - - // Second cleanup IPAddressClaims. - if err := janitor.DeleteIPAddressClaims(ctx); err != nil { - return errors.Wrap(err, "cleaning up IPAddressClaims") + if len(allErrs) > 0 { + return errors.Wrap(kerrors.NewAggregate(allErrs), "cleaning up Boskos resources") } return nil diff --git a/hack/tools/pkg/janitor/janitor.go b/hack/tools/pkg/janitor/janitor.go index e089e93be7..50367dc527 100644 --- a/hack/tools/pkg/janitor/janitor.go +++ b/hack/tools/pkg/janitor/janitor.go @@ -23,7 +23,6 @@ import ( "slices" "sort" "strings" - "time" "github.com/pkg/errors" "github.com/vmware/govmomi/object" @@ -31,30 +30,21 @@ import ( "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/klog/v2" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" ) // NewJanitor creates a new Janitor. -func NewJanitor(vSphereClients *VSphereClients, ipamClient client.Client, maxCreationDate time.Time, ipamNamespace string, dryRun bool) *Janitor { +func NewJanitor(vSphereClients *VSphereClients, dryRun bool) *Janitor { return &Janitor{ - dryRun: dryRun, - ipamClient: ipamClient, - ipamNamespace: ipamNamespace, - maxCreationDate: maxCreationDate, - vSphereClients: vSphereClients, + dryRun: dryRun, + vSphereClients: vSphereClients, } } // Janitor implements a janitor for vSphere. type Janitor struct { - dryRun bool - ipamClient client.Client - ipamNamespace string - maxCreationDate time.Time - vSphereClients *VSphereClients + dryRun bool + vSphereClients *VSphereClients } type virtualMachine struct { @@ -149,10 +139,6 @@ func (s *Janitor) deleteVSphereVMs(ctx context.Context, folder string) error { // Skip templates for deletion. continue } - if managedObjectVM.Config.CreateDate.After(s.maxCreationDate) { - // Ignore vms created after maxCreationDate - continue - } vm := &virtualMachine{ managedObject: managedObjectVM, @@ -257,27 +243,6 @@ func (s *Janitor) deleteObjectChildren(ctx context.Context, inventoryPath string } } - // Get key for the deletion marker. - deletionMarkerKey, err := s.vSphereClients.FieldsManager.FindKey(ctx, vSphereDeletionMarkerName) - if err != nil { - if !errors.Is(err, object.ErrKeyNameNotFound) { - return errors.Wrapf(err, "finding custom field %q", vSphereDeletionMarkerName) - } - - // In case of ErrKeyNameNotFound we will create the deletionMarker but only if - // we are not on dryRun. - log.Info("Creating the deletion field") - - if !s.dryRun { - field, err := s.vSphereClients.FieldsManager.Add(ctx, vSphereDeletionMarkerName, "ManagedEntity", nil, nil) - if err != nil { - return errors.Wrapf(err, "creating custom field %q", vSphereDeletionMarkerName) - } - deletionMarkerKey = field.Key - } - } - - objectsToMark := []*managedElement{} objectsToDelete := []*managedElement{} // Filter elements and collect two groups: @@ -286,24 +251,6 @@ func (s *Janitor) deleteObjectChildren(ctx context.Context, inventoryPath string for i := range managedEntities { managedEntity := managedEntities[i] - // We mark any object we find with a timestamp to determine the first time we did see this item. - // This is used as replacement for the non-existing CreationTimestamp on objects. - timestamp, err := getDeletionMarkerTimestamp(deletionMarkerKey, managedEntity.entity.Value) - if err != nil { - return err - } - // If no timestamp was found: queue it to get marked. - if timestamp == nil { - objectsToMark = append(objectsToMark, managedEntity) - continue - } - - // Filter out objects we don't have to cleanup depending on s.maxCreationDate. - if timestamp.After(s.maxCreationDate) { - log.Info("Skipping deletion of object: marked timestamp does not exceed maxCreationDate", "timestamp", timestamp, "inventoryPath", managedEntity.element.Path) - continue - } - // Filter out objects which have children. if hasChildren[managedEntity.element.Path] { log.Info("Skipping deletion of object: object has child objects of a different type", "inventoryPath", managedEntity.element.Path) @@ -313,20 +260,6 @@ func (s *Janitor) deleteObjectChildren(ctx context.Context, inventoryPath string objectsToDelete = append(objectsToDelete, managedEntity) } - for i := range objectsToMark { - managedElement := objectsToMark[i] - log.Info("Marking resource object for deletion in vSphere", objectType, managedElement.element.Path) - - if s.dryRun { - // Skipping actual mark on dryRun. - continue - } - - if err := s.vSphereClients.FieldsManager.Set(ctx, managedElement.entity.Reference(), deletionMarkerKey, time.Now().Format(time.RFC3339)); err != nil { - return errors.Wrapf(err, "setting field %s on object %s", vSphereDeletionMarkerName, managedElement.element.Path) - } - } - // sort objects to delete so children are deleted before parents sort.Slice(objectsToDelete, func(i, j int) bool { a := objectsToDelete[i] @@ -358,44 +291,6 @@ func (s *Janitor) deleteObjectChildren(ctx context.Context, inventoryPath string return nil } -// DeleteIPAddressClaims deletes IPAddressClaims. -func (s *Janitor) DeleteIPAddressClaims(ctx context.Context) error { - log := ctrl.LoggerFrom(ctx).WithName("IPAddressClaims") - ctrl.LoggerInto(ctx, log) - log.Info("Deleting IPAddressClaims") - - // List all existing IPAddressClaims - ipAddressClaims := &ipamv1.IPAddressClaimList{} - if err := s.ipamClient.List(ctx, ipAddressClaims, - client.InNamespace(s.ipamNamespace), - ); err != nil { - return err - } - - errList := []error{} - - for _, ipAddressClaim := range ipAddressClaims.Items { - ipAddressClaim := ipAddressClaim - // Skip IPAddressClaims which got created after maxCreationDate. - if ipAddressClaim.CreationTimestamp.After(s.maxCreationDate) { - continue - } - - log.Info("Deleting IPAddressClaim", "IPAddressClaim", klog.KObj(&ipAddressClaim)) - - if s.dryRun { - // Skipping actual deletion on dryRun. - continue - } - - if err := s.ipamClient.Delete(ctx, &ipAddressClaim); err != nil { - errList = append(errList, err) - } - } - - return kerrors.NewAggregate(errList) -} - func (s *Janitor) deleteVSphereClusterModules(ctx context.Context) error { log := ctrl.LoggerFrom(ctx).WithName("vSphere cluster modules") ctrl.LoggerInto(ctx, log) diff --git a/hack/tools/pkg/janitor/janitor_test.go b/hack/tools/pkg/janitor/janitor_test.go index a604127142..a2db319317 100644 --- a/hack/tools/pkg/janitor/janitor_test.go +++ b/hack/tools/pkg/janitor/janitor_test.go @@ -114,35 +114,19 @@ func Test_janitor_deleteVSphereVMs(t *testing.T) { // Initialize and start vcsim clients, sim := setup(ctx, t) - deleteAll := time.Now().Add(time.Hour * 1) - deleteNone := time.Now() - tests := []struct { - name string - objects []*vcsimObject - maxCreationDate time.Time - wantErr bool - want map[string]bool + name string + objects []*vcsimObject + wantErr bool + want map[string]bool }{ { name: "delete all VMs", objects: []*vcsimObject{ vcsimVirtualMachine("foo"), }, - maxCreationDate: deleteAll, - wantErr: false, - want: nil, - }, - { - name: "delete no VMs", - objects: []*vcsimObject{ - vcsimVirtualMachine("foo"), - }, - maxCreationDate: deleteNone, - wantErr: false, - want: map[string]bool{ - "VirtualMachine/foo": true, - }, + wantErr: false, + want: nil, }, { name: "recursive vm deletion", @@ -157,8 +141,7 @@ func Test_janitor_deleteVSphereVMs(t *testing.T) { vcsimVirtualMachine("a/bar"), vcsimVirtualMachine("a/b/c/foobar"), }, - maxCreationDate: deleteAll, - wantErr: false, + wantErr: false, want: map[string]bool{ "ResourcePool/a": true, "ResourcePool/a/b": true, @@ -176,9 +159,8 @@ func Test_janitor_deleteVSphereVMs(t *testing.T) { relativePath, _ := setupTestCase(g, sim, tt.objects) s := &Janitor{ - dryRun: false, - maxCreationDate: tt.maxCreationDate, - vSphereClients: clients, + dryRun: false, + vSphereClients: clients, } // use folder created for this test case as inventoryPath @@ -314,9 +296,8 @@ func Test_janitor_deleteObjectChildren(t *testing.T) { inventoryPath := path.Join(tt.basePath, relativePath) s := &Janitor{ - dryRun: false, - maxCreationDate: time.Now().Add(time.Hour * 1), - vSphereClients: clients, + dryRun: false, + vSphereClients: clients, } // Run first iteration which should only tag the resource pools with a timestamp. @@ -348,8 +329,6 @@ func Test_janitor_CleanupVSphere(t *testing.T) { // Initialize and start vcsim clients, sim := setup(ctx, t) - deleteAll := time.Now().Add(time.Hour * 1) - tests := []struct { name string dryRun bool @@ -361,7 +340,6 @@ func Test_janitor_CleanupVSphere(t *testing.T) { { name: "no-op", dryRun: false, - maxCreationDate: deleteAll, objects: nil, wantAfterFirstRun: map[string]bool{}, wantAfterSecondRun: map[string]bool{}, @@ -369,15 +347,13 @@ func Test_janitor_CleanupVSphere(t *testing.T) { { name: "dryRun: no-op", dryRun: true, - maxCreationDate: deleteAll, objects: nil, wantAfterFirstRun: map[string]bool{}, wantAfterSecondRun: map[string]bool{}, }, { - name: "delete everything", - dryRun: false, - maxCreationDate: deleteAll, + name: "delete everything", + dryRun: false, objects: []*vcsimObject{ vcsimFolder("a"), vcsimResourcePool("a"), @@ -394,9 +370,8 @@ func Test_janitor_CleanupVSphere(t *testing.T) { wantAfterSecondRun: map[string]bool{}, }, { - name: "dryRun: would delete everything", - dryRun: true, - maxCreationDate: deleteAll, + name: "dryRun: would delete everything", + dryRun: true, objects: []*vcsimObject{ vcsimFolder("a"), vcsimResourcePool("a"), @@ -427,9 +402,8 @@ func Test_janitor_CleanupVSphere(t *testing.T) { relativePath, _ := setupTestCase(g, sim, tt.objects) s := &Janitor{ - dryRun: tt.dryRun, - maxCreationDate: tt.maxCreationDate, - vSphereClients: clients, + dryRun: tt.dryRun, + vSphereClients: clients, } folder := vcsimFolder("").Path(relativePath) diff --git a/hack/tools/pkg/janitor/vsphere.go b/hack/tools/pkg/janitor/vsphere.go index 3e64e720d7..46c51a5c09 100644 --- a/hack/tools/pkg/janitor/vsphere.go +++ b/hack/tools/pkg/janitor/vsphere.go @@ -18,9 +18,7 @@ package janitor import ( "context" - "fmt" "net/url" - "time" "github.com/pkg/errors" "github.com/vmware/govmomi" @@ -34,7 +32,6 @@ import ( "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/soap" - "github.com/vmware/govmomi/vim25/types" ctrl "sigs.k8s.io/controller-runtime" ) @@ -124,8 +121,6 @@ func NewVSphereClients(ctx context.Context, input NewVSphereClientsInput) (*VSph }, nil } -const vSphereDeletionMarkerName = "capv-janitor-deletion-marker" - func waitForTasksFinished(ctx context.Context, tasks []*object.Task, ignoreErrors bool) error { for _, t := range tasks { if err := t.Wait(ctx); !ignoreErrors && err != nil { @@ -135,31 +130,6 @@ func waitForTasksFinished(ctx context.Context, tasks []*object.Task, ignoreError return nil } -func getDeletionMarkerTimestamp(key int32, values []types.BaseCustomFieldValue) (*time.Time, error) { - // Find the value for the key - var b *types.BaseCustomFieldValue - for i := range values { - if values[i].GetCustomFieldValue().Key != key { - continue - } - b = &values[i] - break - } - - // Key does not exist - if b == nil { - return nil, nil - } - - value, ok := (*b).(*types.CustomFieldStringValue) - if !ok { - return nil, fmt.Errorf("cannot typecast %t to *types.CustomFieldStringValue", *b) - } - - t, err := time.Parse(time.RFC3339, value.Value) - return &t, err -} - type managedElement struct { entity mo.ManagedEntity element *list.Element