From 144a7859d97fcd7e64570081bcad7579e2715720 Mon Sep 17 00:00:00 2001 From: Swanand Shende Date: Mon, 13 Jan 2025 14:30:29 +0530 Subject: [PATCH] Fix namespace metadata not being restored with existing-resource-policy This change ensures that when restoring namespaces with existing-resource-policy set to 'update', the namespace metadata (labels and annotations) from the backup bundle is properly restored. Fixes #7519 Signed-off-by: Swanand Shende --- .../7519-namespace-metadata-restore.yaml | 7 ++ pkg/restore/restore.go | 86 ++++++++++++++++++- 2 files changed, 92 insertions(+), 1 deletion(-) create mode 100644 changelogs/unreleased/7519-namespace-metadata-restore.yaml diff --git a/changelogs/unreleased/7519-namespace-metadata-restore.yaml b/changelogs/unreleased/7519-namespace-metadata-restore.yaml new file mode 100644 index 0000000000..674930b322 --- /dev/null +++ b/changelogs/unreleased/7519-namespace-metadata-restore.yaml @@ -0,0 +1,7 @@ +kind: bug +area: restore +title: Fix namespace metadata not being restored with existing-resource-policy +issue: 7519 +note: | + Fixes an issue where namespace labels and annotations from backup bundles + were not being restored even when --existing-resource-policy was set to "update" diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 619db5b87e..9798c40fba 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -744,6 +744,88 @@ func (ctx *restoreContext) processSelectedResource( } // For namespaces resources we don't need to following steps if groupResource == kuberesource.Namespaces { + if existingNamespaces.Has(targetNS) { + // Check if the existing resource policy is set to 'update' + if len(ctx.restore.Spec.ExistingResourcePolicy) == 0 || ctx.restore.Spec.ExistingResourcePolicy != velerov1api.PolicyTypeUpdate { + ctx.log.Infof("Skipping update for existing namespace %s because existing resource policy is not 'update'", targetNS) + continue + } + + // Fetch the current namespace from the cluster + existingNS, err := ctx.namespaceClient.Get(go_context.TODO(), targetNS, metav1.GetOptions{}) + if err != nil { + errs.AddVeleroError(errors.Wrap(err, "fetching existing namespace")) + continue + } + + // Retrieve the backup namespace definition + backupNS := getNamespace( + ctx.log.WithField("namespace", namespace), + archive.GetItemFilePath(ctx.restoreDir, "namespaces", "", namespace), + targetNS, + ) + + // Convert both namespaces to unstructured for patching + existingNSUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(existingNS) + if err != nil { + errs.AddVeleroError(errors.Wrap(err, "converting existing namespace to unstructured")) + continue + } + backupNSUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(backupNS) + if err != nil { + errs.AddVeleroError(errors.Wrap(err, "converting backup namespace to unstructured")) + continue + } + + // Construct the GroupResource for namespaces + namespaceGR := schema.GroupResource{Group: "", Resource: "namespaces"} + + // Use getResourceClient to obtain a dynamic client for the namespace resource + resourceClient, err := ctx.getResourceClient(namespaceGR, &unstructured.Unstructured{Object: backupNSUnstructured}, "") + if err != nil { + errs.AddVeleroError(errors.Wrap(err, "getting dynamic client for Namespace resource")) + continue + } + + // Process the update policy using the existing function + warningsFromUpdateRP, errsFromUpdateRP := ctx.processUpdateResourcePolicy( + &unstructured.Unstructured{Object: existingNSUnstructured}, + &unstructured.Unstructured{Object: existingNSUnstructured}, // Pass existingNS with restore labels for the second parameter + &unstructured.Unstructured{Object: backupNSUnstructured}, + targetNS, + resourceClient, + ) + + // Fall back to manual label/annotation update if the patch fails + if !errsFromUpdateRP.IsEmpty() { + ctx.log.Warnf("Patch failed for namespace %s, falling back to manual label/annotation update", targetNS) + + // Ensure existingNS.Labels and Annotations are not nil + if existingNS.Labels == nil { + existingNS.Labels = make(map[string]string) + } + if existingNS.Annotations == nil { + existingNS.Annotations = make(map[string]string) + } + + // Merge labels and annotations + for k, v := range backupNS.Labels { + existingNS.Labels[k] = v + } + for k, v := range backupNS.Annotations { + existingNS.Annotations[k] = v + } + + // Apply the updated namespace + _, err = ctx.namespaceClient.Update(go_context.TODO(), existingNS, metav1.UpdateOptions{}) + if err != nil { + errs.AddVeleroError(errors.Wrap(err, "updating namespace manually")) + } + } + + warnings.Merge(&warningsFromUpdateRP) + errs.Merge(&errsFromUpdateRP) + } continue } @@ -2243,7 +2325,9 @@ func (ctx *restoreContext) getOrderedResourceCollection( continue } - if namespace == "" && !boolptr.IsSetToTrue(ctx.restore.Spec.IncludeClusterResources) && !ctx.namespaceIncludesExcludes.IncludeEverything() { + if groupResource.Resource == "namespaces" { + ctx.log.Infof("Including resource namespaces despite being cluster-scoped") + } else if namespace == "" && !boolptr.IsSetToTrue(ctx.restore.Spec.IncludeClusterResources) && !ctx.namespaceIncludesExcludes.IncludeEverything() { ctx.log.Infof("Skipping resource %s because it's cluster-scoped and only specific namespaces are included in the restore", resource) continue }