diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index efbc72fef..bba66dc07 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -81,6 +81,60 @@ func (u *Uninstall) Run(name string) (*releasei.UninstallReleaseResponse, error) if err != nil { return nil, err } + + // Verify ownership in dry-run mode to show what would actually be deleted + manifests := releaseutil.SplitManifests(r.Manifest) + _, files, err := releaseutil.SortManifests(manifests, nil, releaseutil.UninstallOrder) + if err == nil { + filesToKeep, filesToDelete := filterManifestsToKeep(files) + + var builder strings.Builder + for _, file := range filesToDelete { + builder.WriteString("\n---\n" + file.Content) + } + + resources, err := u.cfg.KubeClient.Build(strings.NewReader(builder.String()), false) + if err == nil && len(resources) > 0 { + ownedResources, unownedResources, err := verifyOwnershipBeforeDelete(resources, r.Name, r.Namespace) + if err == nil { + if len(unownedResources) > 0 { + u.cfg.Logger().Warn("dry-run: resources would be skipped because they are not owned by this release", + "release", r.Name, + "count", len(unownedResources)) + for _, info := range unownedResources { + u.cfg.Logger().Warn("dry-run: would skip resource", + "kind", info.Mapping.GroupVersionKind.Kind, + "name", info.Name, + "namespace", info.Namespace) + } + } + + if len(ownedResources) > 0 { + u.cfg.Logger().Debug("dry-run: resources would be deleted", + "release", r.Name, + "count", len(ownedResources)) + for _, info := range ownedResources { + u.cfg.Logger().Debug("dry-run: would delete resource", + "kind", info.Mapping.GroupVersionKind.Kind, + "name", info.Name, + "namespace", info.Namespace) + } + } + } + } + + // Include kept resources in dry-run info + if len(filesToKeep) > 0 { + var kept strings.Builder + kept.WriteString("These resources were kept due to the resource policy:\n") + for _, f := range filesToKeep { + fmt.Fprintf(&kept, "[%s] %s\n", f.Head.Kind, f.Head.Metadata.Name) + } + res := &releasei.UninstallReleaseResponse{Release: r, Info: kept.String()} + return res, nil + } + } + return &releasei.UninstallReleaseResponse{Release: r}, nil } @@ -275,13 +329,46 @@ func (u *Uninstall) deleteRelease(rel *release.Release) (kube.ResourceList, stri if err != nil { return nil, "", []error{fmt.Errorf("unable to build kubernetes objects for delete: %w", err)} } + + // Verify ownership before deleting resources + var ownedResources, unownedResources kube.ResourceList if len(resources) > 0 { - _, errs = u.cfg.KubeClient.Delete(resources, parseCascadingFlag(u.DeletionPropagation)) + ownedResources, unownedResources, err = verifyOwnershipBeforeDelete(resources, rel.Name, rel.Namespace) + if err != nil { + return nil, "", []error{fmt.Errorf("unable to verify resource ownership: %w", err)} + } + + // Log warnings for unowned resources + if len(unownedResources) > 0 { + for _, info := range unownedResources { + u.cfg.Logger().Warn("skipping delete of resource not owned by this release", + "kind", info.Mapping.GroupVersionKind.Kind, + "name", info.Name, + "namespace", info.Namespace, + "release", rel.Name) + } + kept.WriteString(fmt.Sprintf("\n%d resource(s) were not deleted because they are not owned by this release:\n", len(unownedResources))) + for _, info := range unownedResources { + fmt.Fprintf(&kept, "[%s] %s\n", info.Mapping.GroupVersionKind.Kind, info.Name) + } + } + + // Delete only owned resources + if len(ownedResources) > 0 { + for _, info := range ownedResources { + u.cfg.Logger().Debug("deleting resource owned by this release", + "kind", info.Mapping.GroupVersionKind.Kind, + "name", info.Name, + "namespace", info.Namespace, + "release", rel.Name) + } + _, errs = u.cfg.KubeClient.Delete(ownedResources, parseCascadingFlag(u.DeletionPropagation, u.cfg.Logger())) + } } - return resources, kept.String(), errs + return ownedResources, kept.String(), errs } -func parseCascadingFlag(cascadingFlag string) v1.DeletionPropagation { +func parseCascadingFlag(cascadingFlag string, logger *slog.Logger) v1.DeletionPropagation { switch cascadingFlag { case "orphan": return v1.DeletePropagationOrphan @@ -290,7 +377,7 @@ func parseCascadingFlag(cascadingFlag string) v1.DeletionPropagation { case "background": return v1.DeletePropagationBackground default: - slog.Debug("uninstall: given cascade value, defaulting to delete propagation background", "value", cascadingFlag) + logger.Debug("uninstall: given cascade value, defaulting to delete propagation background", "value", cascadingFlag) return v1.DeletePropagationBackground } } diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go index fba1e391f..bed9c04ac 100644 --- a/pkg/action/uninstall_test.go +++ b/pkg/action/uninstall_test.go @@ -17,9 +17,11 @@ limitations under the License. package action import ( + "bytes" "errors" "fmt" "io" + "log/slog" "testing" "github.com/stretchr/testify/assert" @@ -147,9 +149,16 @@ func TestUninstallRelease_Cascade(t *testing.T) { } }` unAction.cfg.Releases.Create(rel) + + // Create dummy resources with Mapping but no Client - this skips ownership verification + // (nil Client is treated as owned) and goes directly to delete + dummyResources := kube.ResourceList{ + newDeploymentResource("secret", ""), + } + failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient) failer.DeleteError = fmt.Errorf("Uninstall with cascade failed") - failer.BuildDummy = true + failer.DummyResources = dummyResources unAction.cfg.KubeClient = failer _, err := unAction.Run(rel.Name) require.Error(t, err) @@ -169,3 +178,178 @@ func TestUninstallRun_UnreachableKubeClient(t *testing.T) { assert.Nil(t, result) assert.ErrorContains(t, err, "connection refused") } + +func TestUninstallRelease_OwnershipVerification(t *testing.T) { + is := assert.New(t) + + // Create a buffer to capture log output + logBuffer := &bytes.Buffer{} + handler := slog.NewTextHandler(logBuffer, &slog.HandlerOptions{Level: slog.LevelDebug}) + + config := actionConfigFixture(t) + config.SetLogger(handler) + + unAction := NewUninstall(config) + unAction.DisableHooks = true + unAction.DryRun = false + unAction.KeepHistory = true + + rel := releaseStub() + rel.Name = "ownership-test" + rel.Namespace = "default" + rel.Manifest = `apiVersion: v1 +kind: ConfigMap +metadata: + name: test-configmap + labels: + app.kubernetes.io/managed-by: Helm + annotations: + meta.helm.sh/release-name: ownership-test + meta.helm.sh/release-namespace: default +data: + key: value` + config.Releases.Create(rel) + + // Create dummy resources with proper ownership metadata + labels := map[string]string{ + "app.kubernetes.io/managed-by": "Helm", + } + annotations := map[string]string{ + "meta.helm.sh/release-name": "ownership-test", + "meta.helm.sh/release-namespace": "default", + } + dummyResources := kube.ResourceList{ + newDeploymentWithOwner("owned-deploy", "default", labels, annotations), + } + failer := config.KubeClient.(*kubefake.FailingKubeClient) + failer.DummyResources = dummyResources + + resi, err := unAction.Run(rel.Name) + is.NoError(err) + is.NotNil(resi) + res, err := releaserToV1Release(resi.Release) + is.NoError(err) + is.Equal(common.StatusUninstalled, res.Info.Status) + + // Verify log contains debug message about deleting owned resource + logOutput := logBuffer.String() + is.Contains(logOutput, "deleting resource owned by this release") + is.Contains(logOutput, "owned-deploy") + is.Contains(logOutput, "Deployment") +} + +func TestUninstallRelease_OwnershipVerification_WithKeepPolicy(t *testing.T) { + is := assert.New(t) + + // Create a buffer to capture log output + logBuffer := &bytes.Buffer{} + handler := slog.NewTextHandler(logBuffer, &slog.HandlerOptions{Level: slog.LevelWarn}) + + config := actionConfigFixture(t) + config.SetLogger(handler) + + unAction := NewUninstall(config) + unAction.DisableHooks = true + unAction.DryRun = false + unAction.KeepHistory = true + + rel := releaseStub() + rel.Name = "keep-and-ownership" + rel.Namespace = "default" + rel.Manifest = `apiVersion: v1 +kind: Secret +metadata: + name: kept-secret + annotations: + helm.sh/resource-policy: keep + meta.helm.sh/release-name: keep-and-ownership + meta.helm.sh/release-namespace: default + labels: + app.kubernetes.io/managed-by: Helm +type: Opaque +data: + password: cGFzc3dvcmQ= +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: deleted-configmap + labels: + app.kubernetes.io/managed-by: Helm + annotations: + meta.helm.sh/release-name: keep-and-ownership + meta.helm.sh/release-namespace: default +data: + key: value` + config.Releases.Create(rel) + + // Create dummy resources - one unowned to test logging + dummyResources := kube.ResourceList{ + newDeploymentWithOwner("unowned-deploy", "default", nil, nil), + } + failer := config.KubeClient.(*kubefake.FailingKubeClient) + failer.DummyResources = dummyResources + + res, err := unAction.Run(rel.Name) + is.NoError(err) + is.NotNil(res) + // Should contain info about kept resources + is.Contains(res.Info, "kept due to the resource policy") + + // Verify log contains warning about skipped unowned resource + logOutput := logBuffer.String() + is.Contains(logOutput, "skipping delete of resource not owned by this release") + is.Contains(logOutput, "unowned-deploy") +} + +func TestUninstallRelease_DryRun_OwnershipVerification(t *testing.T) { + is := assert.New(t) + + // Create a buffer to capture log output + logBuffer := &bytes.Buffer{} + handler := slog.NewTextHandler(logBuffer, &slog.HandlerOptions{Level: slog.LevelWarn}) + + config := actionConfigFixture(t) + config.SetLogger(handler) + + unAction := NewUninstall(config) + unAction.DisableHooks = true + unAction.DryRun = true + + rel := releaseStub() + rel.Name = "dryrun-ownership" + rel.Namespace = "default" + rel.Manifest = `apiVersion: v1 +kind: ConfigMap +metadata: + name: test-configmap + labels: + app.kubernetes.io/managed-by: Helm + annotations: + meta.helm.sh/release-name: dryrun-ownership + meta.helm.sh/release-namespace: default +data: + key: value` + config.Releases.Create(rel) + + // Create dummy resources - one unowned to test dry-run logging + dummyResources := kube.ResourceList{ + newDeploymentWithOwner("dryrun-unowned-deploy", "default", nil, nil), + } + failer := config.KubeClient.(*kubefake.FailingKubeClient) + failer.DummyResources = dummyResources + + resi, err := unAction.Run(rel.Name) + is.NoError(err) + is.NotNil(resi) + is.NotNil(resi.Release) + res, err := releaserToV1Release(resi.Release) + is.NoError(err) + is.Equal("dryrun-ownership", res.Name) + + // Verify log contains dry-run warning about resources that would be skipped + logOutput := logBuffer.String() + is.Contains(logOutput, "dry-run: would skip resource") + is.Contains(logOutput, "dryrun-unowned-deploy") + is.Contains(logOutput, "Deployment") +} diff --git a/pkg/action/validate.go b/pkg/action/validate.go index 1bef5a742..3a9d63253 100644 --- a/pkg/action/validate.go +++ b/pkg/action/validate.go @@ -93,6 +93,55 @@ func existingResourceConflict(resources kube.ResourceList, releaseName, releaseN return requireUpdate, err } +// verifyOwnershipBeforeDelete checks that resources in the list are owned by the specified release. +// It returns two lists: owned resources (safe to delete) and unowned resources (should skip). +// Resources that are not found are considered owned (already deleted, safe to attempt delete). +func verifyOwnershipBeforeDelete(resources kube.ResourceList, releaseName, releaseNamespace string) (kube.ResourceList, kube.ResourceList, error) { + var owned kube.ResourceList + var unowned kube.ResourceList + + err := resources.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + // If client is not available, skip verification (test scenario or build failure) + if info.Client == nil { + infoCopy := *info + owned.Append(&infoCopy) + return nil + } + + helper := resource.NewHelper(info.Client, info.Mapping) + existing, err := helper.Get(info.Namespace, info.Name) + if err != nil { + if apierrors.IsNotFound(err) { + // Resource already deleted, skip deletion + return nil + } + // Cannot fetch resource (network/permission issue), cannot verify ownership + infoCopy := *info + unowned.Append(&infoCopy) + return nil + } + + // Verify ownership of the existing resource + if err := checkOwnership(existing, releaseName, releaseNamespace); err != nil { + // Resource not owned by this release, cannot delete + infoCopy := *info + unowned.Append(&infoCopy) + return nil + } + + // Resource is owned by this release, can delete + infoCopy := *info + owned.Append(&infoCopy) + return nil + }) + + return owned, unowned, err +} + func checkOwnership(obj runtime.Object, releaseName, releaseNamespace string) error { lbls, err := accessor.Labels(obj) if err != nil { diff --git a/pkg/action/validate_test.go b/pkg/action/validate_test.go index 879a5fa4f..d3e0edcde 100644 --- a/pkg/action/validate_test.go +++ b/pkg/action/validate_test.go @@ -210,6 +210,98 @@ func TestCheckOwnership(t *testing.T) { assert.EqualError(t, err, `invalid ownership metadata; label validation error: key "app.kubernetes.io/managed-by" must equal "Helm": current value is "helm"`) } +func TestVerifyOwnershipBeforeDelete(t *testing.T) { + var ( + releaseName = "rel-a" + releaseNamespace = "ns-a" + labels = map[string]string{ + appManagedByLabel: appManagedByHelm, + } + annotations = map[string]string{ + helmReleaseNameAnnotation: releaseName, + helmReleaseNamespaceAnnotation: releaseNamespace, + } + wrongAnnotations = map[string]string{ + helmReleaseNameAnnotation: "rel-b", + helmReleaseNamespaceAnnotation: releaseNamespace, + } + ) + + // Test all resources properly owned + t.Run("all resources owned", func(t *testing.T) { + owned1 := newDeploymentWithOwner("owned1", "ns-a", labels, annotations) + owned2 := newDeploymentWithOwner("owned2", "ns-a", labels, annotations) + resources := kube.ResourceList{owned1, owned2} + + ownedList, unownedList, err := verifyOwnershipBeforeDelete(resources, releaseName, releaseNamespace) + assert.NoError(t, err) + assert.Len(t, ownedList, 2) + assert.Len(t, unownedList, 0) + }) + + // Test mix of owned and unowned resources + t.Run("mixed ownership", func(t *testing.T) { + owned := newDeploymentWithOwner("owned", "ns-a", labels, annotations) + unowned := newDeploymentWithOwner("unowned", "ns-a", labels, wrongAnnotations) + resources := kube.ResourceList{owned, unowned} + + ownedList, unownedList, err := verifyOwnershipBeforeDelete(resources, releaseName, releaseNamespace) + assert.NoError(t, err) + assert.Len(t, ownedList, 1) + assert.Len(t, unownedList, 1) + assert.Equal(t, "owned", ownedList[0].Name) + assert.Equal(t, "unowned", unownedList[0].Name) + }) + + // Test resource not found (should be skipped - not in either list) + t.Run("resource not found", func(t *testing.T) { + missing := newMissingDeployment("missing", "ns-a") + resources := kube.ResourceList{missing} + + ownedList, unownedList, err := verifyOwnershipBeforeDelete(resources, releaseName, releaseNamespace) + assert.NoError(t, err) + assert.Len(t, ownedList, 0) + assert.Len(t, unownedList, 0) + }) + + // Test resource with no ownership metadata + t.Run("no ownership metadata", func(t *testing.T) { + noMeta := newDeploymentWithOwner("no-meta", "ns-a", nil, nil) + resources := kube.ResourceList{noMeta} + + ownedList, unownedList, err := verifyOwnershipBeforeDelete(resources, releaseName, releaseNamespace) + assert.NoError(t, err) + assert.Len(t, ownedList, 0) + assert.Len(t, unownedList, 1) + }) + + // Test resource owned by different release + t.Run("owned by different release", func(t *testing.T) { + otherRelease := newDeploymentWithOwner("other", "ns-a", labels, wrongAnnotations) + resources := kube.ResourceList{otherRelease} + + ownedList, unownedList, err := verifyOwnershipBeforeDelete(resources, releaseName, releaseNamespace) + assert.NoError(t, err) + assert.Len(t, ownedList, 0) + assert.Len(t, unownedList, 1) + }) + + // Test mixed scenario: owned, unowned, and missing resources + t.Run("mixed with missing resources", func(t *testing.T) { + owned := newDeploymentWithOwner("owned", "ns-a", labels, annotations) + unowned := newDeploymentWithOwner("unowned", "ns-a", labels, wrongAnnotations) + missing := newMissingDeployment("missing", "ns-a") + resources := kube.ResourceList{owned, unowned, missing} + + ownedList, unownedList, err := verifyOwnershipBeforeDelete(resources, releaseName, releaseNamespace) + assert.NoError(t, err) + assert.Len(t, ownedList, 1) + assert.Len(t, unownedList, 1) + assert.Equal(t, "owned", ownedList[0].Name) + assert.Equal(t, "unowned", unownedList[0].Name) + }) +} + func TestSetMetadataVisitor(t *testing.T) { var ( err error