From 443a2a6924fc384e87ff4251e5ac9c077d607f0f Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Fri, 16 Jan 2026 18:51:26 -0800 Subject: [PATCH 1/9] Modernize Helm v3 CONTRIBUTING.md Signed-off-by: George Jenkins --- CONTRIBUTING.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e809e7ca2..83e5a928e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,9 +11,13 @@ vulnerability_, please email a report to [cncf-helm-security@lists.cncf.io](mailto:cncf-helm-security@lists.cncf.io). This will give us a chance to try to fix the issue before it is exploited in the wild. -## Helm v3 and v4 +## Helm v3 -Helm v4 is currently under development on the `main` branch. During the development of Helm v4 and for some time after its released, Helm v3 will continue to be supported and developed on the `dev-v3` branch. Helm v3 will continue to get bug fixes and updates for new Kubernetes releases. Helm v4 is where new features and major changes will happen. For features to be backported to Helm v3, an exception will be needed. Bugs should first be fixed on Helm v4 and then backported to Helm v3. +Helm v4 is developed on the `main` branch. Helm v3 on the `dev-v3` branch. + +Helm v3 will continue to get bug fixes and updates for new Kubernetes releases until July 8th 2026. And security fixes until November 11th 2026. See the blog for more details. + +Bugs should first be fixed on Helm v4 and then backported to Helm v3. Helm v3 (and the `dev-v3` branch) is no longer accepting features. ## Sign Your Work From bbec77c1f762c4d92678e7f455951757a2e036a3 Mon Sep 17 00:00:00 2001 From: Matheus Pimenta Date: Wed, 14 Jan 2026 22:42:12 +0000 Subject: [PATCH 2/9] bugfix(kstatus): do not wait forever on failed resources Signed-off-by: Matheus Pimenta --- pkg/kube/client_test.go | 61 ++++++-- pkg/kube/statuswait.go | 53 ++++--- pkg/kube/statuswait_test.go | 282 ++++++++++++++++++++++++++---------- 3 files changed, 288 insertions(+), 108 deletions(-) diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index e257c66d2..7e4cf046c 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -32,9 +32,11 @@ import ( "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" "github.com/fluxcd/cli-utils/pkg/kstatus/status" "github.com/fluxcd/cli-utils/pkg/object" + "github.com/fluxcd/cli-utils/pkg/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -44,8 +46,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" jsonserializer "k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" + dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" k8sfake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" @@ -1241,7 +1245,7 @@ func (c createPatchTestCase) run(t *testing.T) { } } -func newTestCustomResourceData(metadata map[string]string, spec map[string]interface{}) *unstructured.Unstructured { +func newTestCustomResourceData(metadata map[string]string, spec map[string]any) *unstructured.Unstructured { if metadata == nil { metadata = make(map[string]string) } @@ -1251,7 +1255,7 @@ func newTestCustomResourceData(metadata map[string]string, spec map[string]inter if _, ok := metadata["namespace"]; !ok { metadata["namespace"] = "default" } - o := map[string]interface{}{ + o := map[string]any{ "apiVersion": "crd.com/v1", "kind": "Data", "metadata": metadata, @@ -1274,7 +1278,7 @@ func TestCreatePatchCustomResourceMetadata(t *testing.T) { name: "take ownership of resource", target: target, original: target, - actual: newTestCustomResourceData(nil, map[string]interface{}{ + actual: newTestCustomResourceData(nil, map[string]any{ "color": "red", }), threeWayMergeForUnstructured: true, @@ -1290,7 +1294,7 @@ func TestCreatePatchCustomResourceMetadata(t *testing.T) { } func TestCreatePatchCustomResourceSpec(t *testing.T) { - target := newTestCustomResourceData(nil, map[string]interface{}{ + target := newTestCustomResourceData(nil, map[string]any{ "color": "red", "size": "large", }) @@ -1298,7 +1302,7 @@ func TestCreatePatchCustomResourceSpec(t *testing.T) { name: "merge with spec of existing custom resource", target: target, original: target, - actual: newTestCustomResourceData(nil, map[string]interface{}{ + actual: newTestCustomResourceData(nil, map[string]any{ "color": "red", "weight": "heavy", }), @@ -2275,9 +2279,19 @@ metadata: }, } - var err error - c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy, WithKStatusReaders(statusReaders...)) - require.NoError(t, err) + // Create a fake dynamic client with the pod resource + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper(v1.SchemeGroupVersion.WithKind("Pod")) + + // Create the pod in the fake client + createManifest(t, podManifest, fakeMapper, fakeClient) + + // Set up the waiter with the fake client and custom status readers + c.Waiter = &statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: statusReaders, + } resources, err := c.Build(strings.NewReader(podManifest), false) require.NoError(t, err) @@ -2307,9 +2321,19 @@ metadata: }, } - var err error - c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy, WithKStatusReaders(statusReaders...)) - require.NoError(t, err) + // Create a fake dynamic client with the job resource + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper(batchv1.SchemeGroupVersion.WithKind("Job")) + + // Create the job in the fake client + createManifest(t, jobManifest, fakeMapper, fakeClient) + + // Set up the waiter with the fake client and custom status readers + c.Waiter = &statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: statusReaders, + } resources, err := c.Build(strings.NewReader(jobManifest), false) require.NoError(t, err) @@ -2319,3 +2343,18 @@ metadata: err = c.WaitWithJobs(resources, time.Second*3) require.NoError(t, err) } + +func createManifest(t *testing.T, manifest string, + fakeMapper meta.RESTMapper, fakeClient *dynamicfake.FakeDynamicClient) { + t.Helper() + + m := make(map[string]any) + err := yaml.Unmarshal([]byte(manifest), &m) + require.NoError(t, err) + obj := &unstructured.Unstructured{Object: m} + gvk := obj.GroupVersionKind() + mapping, err := fakeMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + require.NoError(t, err) + err = fakeClient.Tracker().Create(mapping.Resource, obj, obj.GetNamespace()) + require.NoError(t, err) +} diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 75c18de81..bd6e4f93a 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -145,17 +145,19 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL return statusCollector.Error } - // Only check parent context error, otherwise we would error when desired status is achieved. - if ctx.Err() != nil { - errs := []error{} - for _, id := range resources { - rs := statusCollector.ResourceStatuses[id] - if rs.Status == status.NotFoundStatus { - continue - } - errs = append(errs, fmt.Errorf("resource still exists, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs := []error{} + for _, id := range resources { + rs := statusCollector.ResourceStatuses[id] + if rs.Status == status.NotFoundStatus || rs.Status == status.UnknownStatus { + continue } - errs = append(errs, ctx.Err()) + errs = append(errs, fmt.Errorf("resource %s/%s/%s still exists. status: %s, message: %s", + rs.Identifier.GroupKind.Kind, rs.Identifier.Namespace, rs.Identifier.Name, rs.Status, rs.Message)) + } + if err := ctx.Err(); err != nil { + errs = append(errs, err) + } + if len(errs) > 0 { return errors.Join(errs...) } return nil @@ -190,17 +192,19 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w return statusCollector.Error } - // Only check parent context error, otherwise we would error when desired status is achieved. - if ctx.Err() != nil { - errs := []error{} - for _, id := range resources { - rs := statusCollector.ResourceStatuses[id] - if rs.Status == status.CurrentStatus { - continue - } - errs = append(errs, fmt.Errorf("resource not ready, name: %s, kind: %s, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status)) + errs := []error{} + for _, id := range resources { + rs := statusCollector.ResourceStatuses[id] + if rs.Status == status.CurrentStatus { + continue } - errs = append(errs, ctx.Err()) + errs = append(errs, fmt.Errorf("resource %s/%s/%s not ready. status: %s, message: %s", + rs.Identifier.GroupKind.Kind, rs.Identifier.Namespace, rs.Identifier.Name, rs.Status, rs.Message)) + } + if err := ctx.Err(); err != nil { + errs = append(errs, err) + } + if len(errs) > 0 { return errors.Join(errs...) } return nil @@ -225,11 +229,16 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector. if rs == nil { continue } - // If a resource is already deleted before waiting has started, it will show as unknown - // this check ensures we don't wait forever for a resource that is already deleted + // If a resource is already deleted before waiting has started, it will show as unknown. + // This check ensures we don't wait forever for a resource that is already deleted. if rs.Status == status.UnknownStatus && desired == status.NotFoundStatus { continue } + // Failed is a terminal state. This check ensures we don't wait forever for a resource + // that has already failed, as intervention is required to resolve the failure. + if rs.Status == status.FailedStatus && desired == status.CurrentStatus { + continue + } rss = append(rss, rs) if rs.Status != desired { nonDesiredResources = append(nonDesiredResources, rs) diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index a4e7ff62c..0d60a526f 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -101,10 +101,27 @@ status: succeeded: 1 active: 0 conditions: - - type: Complete + - type: Complete status: "True" ` +var jobFailedManifest = ` +apiVersion: batch/v1 +kind: Job +metadata: + name: failed-job + namespace: default + generation: 1 +status: + failed: 1 + active: 0 + conditions: + - type: Failed + status: "True" + reason: BackoffLimitExceeded + message: "Job has reached the specified backoff limit" +` + var podCompleteManifest = ` apiVersion: v1 kind: Pod @@ -279,7 +296,7 @@ func TestStatusWaitForDelete(t *testing.T) { name string manifestsToCreate []string manifestsToDelete []string - expectErrs []error + expectErrs []string }{ { name: "wait for pod to be deleted", @@ -291,7 +308,7 @@ func TestStatusWaitForDelete(t *testing.T) { name: "error when not all objects are deleted", manifestsToCreate: []string{jobCompleteManifest, podCurrentManifest}, manifestsToDelete: []string{jobCompleteManifest}, - expectErrs: []error{errors.New("resource still exists, name: current-pod, kind: Pod, status: Current"), errors.New("context deadline exceeded")}, + expectErrs: []string{"resource Pod/ns/current-pod still exists. status: Current", "context deadline exceeded"}, }, } for _, tt := range tests { @@ -329,7 +346,10 @@ func TestStatusWaitForDelete(t *testing.T) { resourceList := getResourceListFromRuntimeObjs(t, c, objsToCreate) err := statusWaiter.WaitForDelete(resourceList, timeout) if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -359,37 +379,35 @@ func TestStatusWaitForDeleteNonExistentObject(t *testing.T) { func TestStatusWait(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - expectErrs []error - waitForJobs bool + name string + objManifests []string + expectErrStrs []string + waitForJobs bool }{ { - name: "Job is not complete", - objManifests: []string{jobNoStatusManifest}, - expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, - waitForJobs: true, + name: "Job is not complete", + objManifests: []string{jobNoStatusManifest}, + expectErrStrs: []string{"resource Job/qual/test not ready. status: InProgress", "context deadline exceeded"}, + waitForJobs: true, }, { - name: "Job is ready but not complete", - objManifests: []string{jobReadyManifest}, - expectErrs: nil, - waitForJobs: false, + name: "Job is ready but not complete", + objManifests: []string{jobReadyManifest}, + expectErrStrs: nil, + waitForJobs: false, }, { name: "Pod is ready", objManifests: []string{podCurrentManifest}, - expectErrs: nil, }, { - name: "one of the pods never becomes ready", - objManifests: []string{podNoStatusManifest, podCurrentManifest}, - expectErrs: []error{errors.New("resource not ready, name: in-progress-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + name: "one of the pods never becomes ready", + objManifests: []string{podNoStatusManifest, podCurrentManifest}, + expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: InProgress", "context deadline exceeded"}, }, { name: "paused deployment passes", objManifests: []string{pausedDeploymentManifest}, - expectErrs: nil, }, } @@ -416,8 +434,11 @@ func TestStatusWait(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.Wait(resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -428,23 +449,23 @@ func TestStatusWait(t *testing.T) { func TestWaitForJobComplete(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - expectErrs []error + name string + objManifests []string + expectErrStrs []string }{ { name: "Job is complete", objManifests: []string{jobCompleteManifest}, }, { - name: "Job is not ready", - objManifests: []string{jobNoStatusManifest}, - expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Job is not ready", + objManifests: []string{jobNoStatusManifest}, + expectErrStrs: []string{"resource Job/qual/test not ready. status: InProgress", "context deadline exceeded"}, }, { - name: "Job is ready but not complete", - objManifests: []string{jobReadyManifest}, - expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Job is ready but not complete", + objManifests: []string{jobReadyManifest}, + expectErrStrs: []string{"resource Job/default/ready-not-complete not ready. status: InProgress", "context deadline exceeded"}, }, } @@ -469,8 +490,11 @@ func TestWaitForJobComplete(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.WaitWithJobs(resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -481,9 +505,9 @@ func TestWaitForJobComplete(t *testing.T) { func TestWatchForReady(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - expectErrs []error + name string + objManifests []string + expectErrStrs []string }{ { name: "succeeds if pod and job are complete", @@ -494,14 +518,14 @@ func TestWatchForReady(t *testing.T) { objManifests: []string{notReadyDeploymentManifest}, }, { - name: "Fails if job is not complete", - objManifests: []string{jobReadyManifest}, - expectErrs: []error{errors.New("resource not ready, name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Fails if job is not complete", + objManifests: []string{jobReadyManifest}, + expectErrStrs: []string{"resource Job/default/ready-not-complete not ready. status: InProgress", "context deadline exceeded"}, }, { - name: "Fails if pod is not complete", - objManifests: []string{podCurrentManifest}, - expectErrs: []error{errors.New("resource not ready, name: current-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + name: "Fails if pod is not complete", + objManifests: []string{podCurrentManifest}, + expectErrStrs: []string{"resource Pod/ns/current-pod not ready. status: InProgress", "context deadline exceeded"}, }, } @@ -528,8 +552,11 @@ func TestWatchForReady(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.WatchUntilReady(resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -540,10 +567,10 @@ func TestWatchForReady(t *testing.T) { func TestStatusWaitMultipleNamespaces(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - expectErrs []error - testFunc func(statusWaiter, ResourceList, time.Duration) error + name string + objManifests []string + expectErrStrs []string + testFunc func(statusWaiter, ResourceList, time.Duration) error }{ { name: "pods in multiple namespaces", @@ -560,9 +587,9 @@ func TestStatusWaitMultipleNamespaces(t *testing.T) { }, }, { - name: "error when resource not ready in one namespace", - objManifests: []string{podNamespace1NoStatusManifest, podNamespace2Manifest}, - expectErrs: []error{errors.New("resource not ready, name: pod-ns1, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + name: "error when resource not ready in one namespace", + objManifests: []string{podNamespace1NoStatusManifest, podNamespace2Manifest}, + expectErrStrs: []string{"resource Pod/namespace-1/pod-ns1 not ready. status: InProgress", "context deadline exceeded"}, testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, @@ -642,8 +669,11 @@ func TestStatusWaitMultipleNamespaces(t *testing.T) { resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := tt.testFunc(sw, resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -978,10 +1008,10 @@ func (m *mockStatusReader) ReadStatusForObject(_ context.Context, _ engine.Clust func TestStatusWaitWithCustomReaders(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - customReader *mockStatusReader - expectErrs []error + name string + objManifests []string + customReader *mockStatusReader + expectErrStrs []string }{ { name: "custom reader makes pod immediately current", @@ -990,7 +1020,6 @@ func TestStatusWaitWithCustomReaders(t *testing.T) { supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), status: status.CurrentStatus, }, - expectErrs: nil, }, { name: "custom reader returns in-progress status", @@ -999,7 +1028,7 @@ func TestStatusWaitWithCustomReaders(t *testing.T) { supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), status: status.InProgressStatus, }, - expectErrs: []error{errors.New("resource not ready, name: current-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + expectErrStrs: []string{"resource Pod/ns/current-pod not ready. status: InProgress", "context deadline exceeded"}, }, { name: "custom reader for different resource type is not used", @@ -1008,7 +1037,6 @@ func TestStatusWaitWithCustomReaders(t *testing.T) { supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), status: status.InProgressStatus, }, - expectErrs: nil, }, } @@ -1035,8 +1063,11 @@ func TestStatusWaitWithCustomReaders(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.Wait(resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) @@ -1113,13 +1144,115 @@ func TestStatusWaitWithJobsAndCustomReaders(t *testing.T) { } } +func TestStatusWaitWithFailedResources(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + customReader *mockStatusReader + expectErrStrs []string + testFunc func(statusWaiter, ResourceList, time.Duration) error + }{ + { + name: "Wait returns error when resource has failed", + objManifests: []string{podNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.FailedStatus, + }, + expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader"}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "WaitWithJobs returns error when job has failed", + objManifests: []string{jobFailedManifest}, + customReader: nil, // Use the built-in job status reader + expectErrStrs: []string{ + "resource Job/default/failed-job not ready. status: Failed", + }, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WaitWithJobs(rl, timeout) + }, + }, + { + name: "Wait returns errors when multiple resources fail", + objManifests: []string{podNoStatusManifest, podCurrentManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.FailedStatus, + }, + // The mock reader will make both pods return FailedStatus + expectErrStrs: []string{ + "resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader", + "resource Pod/ns/current-pod not ready. status: Failed, message: mock status reader", + }, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.Wait(rl, timeout) + }, + }, + { + name: "WatchUntilReady returns error when resource has failed", + objManifests: []string{podNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.FailedStatus, + }, + // WatchUntilReady also waits for CurrentStatus, so failed resources should return error + expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader"}, + testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + return sw.WatchUntilReady(rl, timeout) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + var readers []engine.StatusReader + if tt.customReader != nil { + readers = []engine.StatusReader{tt.customReader} + } + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: readers, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := tt.testFunc(sw, resourceList, time.Second*3) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } + return + } + assert.NoError(t, err) + }) + } +} + func TestWatchUntilReadyWithCustomReaders(t *testing.T) { t.Parallel() tests := []struct { - name string - objManifests []string - customReader *mockStatusReader - expectErrs []error + name string + objManifests []string + customReader *mockStatusReader + expectErrStrs []string }{ { name: "custom reader makes job immediately current for hooks", @@ -1128,7 +1261,6 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), status: status.CurrentStatus, }, - expectErrs: nil, }, { name: "custom reader makes pod immediately current for hooks", @@ -1137,7 +1269,6 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), status: status.CurrentStatus, }, - expectErrs: nil, }, { name: "custom reader takes precedence over built-in pod reader", @@ -1146,7 +1277,7 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), status: status.InProgressStatus, }, - expectErrs: []error{errors.New("resource not ready, name: good-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + expectErrStrs: []string{"resource Pod/ns/good-pod not ready. status: InProgress", "context deadline exceeded"}, }, { name: "custom reader takes precedence over built-in job reader", @@ -1155,7 +1286,7 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), status: status.InProgressStatus, }, - expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + expectErrStrs: []string{"resource Job/qual/test not ready. status: InProgress", "context deadline exceeded"}, }, { name: "custom reader for different resource type does not affect pods", @@ -1164,7 +1295,6 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), status: status.InProgressStatus, }, - expectErrs: nil, }, { name: "built-in readers still work when custom reader does not match", @@ -1173,7 +1303,6 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), status: status.InProgressStatus, }, - expectErrs: nil, }, } @@ -1200,8 +1329,11 @@ func TestWatchUntilReadyWithCustomReaders(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) err := statusWaiter.WatchUntilReady(resourceList, time.Second*3) - if tt.expectErrs != nil { - assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + if tt.expectErrStrs != nil { + require.Error(t, err) + for _, expectedErrStr := range tt.expectErrStrs { + assert.Contains(t, err.Error(), expectedErrStr) + } return } assert.NoError(t, err) From e70d59de7cd7ea2a501d809b3245ebfa0412e0ec Mon Sep 17 00:00:00 2001 From: Evans Mungai Date: Mon, 19 Jan 2026 16:42:04 +0000 Subject: [PATCH 3/9] docs: document uninstall using cascade foreground flag Signed-off-by: Evans Mungai --- pkg/cmd/uninstall.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/cmd/uninstall.go b/pkg/cmd/uninstall.go index 4cc14ae1e..49f7bd19d 100644 --- a/pkg/cmd/uninstall.go +++ b/pkg/cmd/uninstall.go @@ -35,6 +35,9 @@ as well as the release history, freeing it up for future use. Use the '--dry-run' flag to see which releases will be uninstalled without actually uninstalling them. + +Use '--cascade foreground' with '--wait' to ensure resources with finalizers +are fully deleted before the command returns. ` func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { @@ -76,7 +79,7 @@ func newUninstallCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.DisableHooks, "no-hooks", false, "prevent hooks from running during uninstallation") f.BoolVar(&client.IgnoreNotFound, "ignore-not-found", false, `Treat "release not found" as a successful uninstall`) f.BoolVar(&client.KeepHistory, "keep-history", false, "remove all associated resources and mark the release as deleted, but retain the release history") - f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background.") + f.StringVar(&client.DeletionPropagation, "cascade", "background", "Must be \"background\", \"orphan\", or \"foreground\". Selects the deletion cascading strategy for the dependents. Defaults to background. Use \"foreground\" with --wait to ensure resources with finalizers are fully deleted before returning.") f.DurationVar(&client.Timeout, "timeout", 300*time.Second, "time to wait for any individual Kubernetes operation (like Jobs for hooks)") f.StringVar(&client.Description, "description", "", "add a custom description") AddWaitFlag(cmd, &client.WaitStrategy) From 52620076e21ad6afd0f48df6772001b1466c966b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jan 2026 22:34:25 +0000 Subject: [PATCH 4/9] chore(deps): bump sigs.k8s.io/controller-runtime from 0.22.4 to 0.23.0 Bumps [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) from 0.22.4 to 0.23.0. - [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases) - [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/main/RELEASE.md) - [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.22.4...v0.23.0) --- updated-dependencies: - dependency-name: sigs.k8s.io/controller-runtime dependency-version: 0.23.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6e2d9c15d..4b0bb8977 100644 --- a/go.mod +++ b/go.mod @@ -48,7 +48,7 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.35.0 oras.land/oras-go/v2 v2.6.0 - sigs.k8s.io/controller-runtime v0.22.4 + sigs.k8s.io/controller-runtime v0.23.0 sigs.k8s.io/kustomize/kyaml v0.21.0 sigs.k8s.io/yaml v1.6.0 ) diff --git a/go.sum b/go.sum index b1e843f1b..6e010e4b2 100644 --- a/go.sum +++ b/go.sum @@ -502,8 +502,8 @@ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzk k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= -sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= -sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/controller-runtime v0.23.0 h1:Ubi7klJWiwEWqDY+odSVZiFA0aDSevOCXpa38yCSYu8= +sigs.k8s.io/controller-runtime v0.23.0/go.mod h1:DBOIr9NsprUqCZ1ZhsuJ0wAnQSIxY/C6VjZbmLgw0j0= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= From 97fd00786f16ebc3b68164bff7133592f19f70b6 Mon Sep 17 00:00:00 2001 From: Jeevan Yewale Date: Mon, 19 Jan 2026 12:40:51 +0530 Subject: [PATCH 5/9] Remove legacy sync-repo.sh script Signed-off-by: Jeevan Yewale --- scripts/sync-repo.sh | 83 -------------------------------------------- 1 file changed, 83 deletions(-) delete mode 100755 scripts/sync-repo.sh diff --git a/scripts/sync-repo.sh b/scripts/sync-repo.sh deleted file mode 100755 index 0651d3634..000000000 --- a/scripts/sync-repo.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env bash - -# Copyright The Helm Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Bash 'Strict Mode' -# http://redsymbol.net/articles/unofficial-bash-strict-mode -set -euo pipefail -IFS=$'\n\t' - -# Helper Functions ------------------------------------------------------------- - -# Display error message and exit -error_exit() { - echo "error: ${1:-"unknown error"}" 1>&2 - exit 1 -} - -# Checks if a command exists. Returns 1 or 0 -command_exists() { - hash "${1}" 2>/dev/null -} - -# Program Functions ------------------------------------------------------------ - -verify_prereqs() { - echo "Verifying Prerequisites...." - if command_exists gsutil; then - echo "Thumbs up! Looks like you have gsutil. Let's continue." - else - error_exit "Couldn't find gsutil. Bailing out." - fi -} - -confirm() { - # shellcheck disable=SC2154 - case $response in - [yY][eE][sS]|[yY]) - true - ;; - *) - false - ;; - esac -} - -# Main ------------------------------------------------------------------------- - -main() { - if [ "$#" -ne 2 ]; then - error_exit "Illegal number of parameters. You must pass in local directory path and a GCS bucket name" - fi - - echo "Getting ready to sync your local directory ($1) to a remote repository at gs://$2" - - verify_prereqs - - # dry run of the command - gsutil rsync -d -n "$1" gs://"$2" - - read -r -p "Are you sure you would like to continue with these changes? [y/N] " confirm - if [[ $confirm =~ [yY](es)* ]]; then - gsutil rsync -d "$1" gs://"$2" - else - error_exit "Discontinuing sync process." - fi - - echo "Your remote chart repository now matches the contents of the $1 directory!" - -} - -main "${@:-}" From 26e28e846af1ceaf63e16c4f2e52bbfbab411ba1 Mon Sep 17 00:00:00 2001 From: George Jenkins Date: Tue, 20 Jan 2026 16:24:21 -0800 Subject: [PATCH 6/9] Apply suggestions from code review Co-authored-by: Andrew Block Signed-off-by: George Jenkins --- CONTRIBUTING.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 83e5a928e..0b704aa9a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,11 +13,11 @@ chance to try to fix the issue before it is exploited in the wild. ## Helm v3 -Helm v4 is developed on the `main` branch. Helm v3 on the `dev-v3` branch. +Helm v4 development takes place on the `main` branch while Helm v3 is on the `dev-v3` branch. -Helm v3 will continue to get bug fixes and updates for new Kubernetes releases until July 8th 2026. And security fixes until November 11th 2026. See the blog for more details. +Helm v3 will continue to receive bug fixes and updates for new Kubernetes releases until July 8th 2026. Security enhancement will still be applied until November 11th 2026. See the blog for more details. -Bugs should first be fixed on Helm v4 and then backported to Helm v3. Helm v3 (and the `dev-v3` branch) is no longer accepting features. +Bugs should first be fixed on Helm v4 and then backported to Helm v3. Helm v3 (and the `dev-v3` branch) is no longer accepting new features. ## Sign Your Work From b0b35f1231b0b885b1624c5586938cfa69d30995 Mon Sep 17 00:00:00 2001 From: Matheus Pimenta Date: Thu, 15 Jan 2026 14:53:15 +0000 Subject: [PATCH 7/9] feat(kstatus): fine-grained context options for waiting Signed-off-by: Matheus Pimenta --- pkg/kube/client.go | 12 +- pkg/kube/options.go | 41 +++- pkg/kube/statuswait.go | 27 ++- pkg/kube/statuswait_test.go | 469 ++++++++++++++++++++++++++++++++++++ 4 files changed, 533 insertions(+), 16 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 9af4bbcb3..ad8f851d1 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -167,10 +167,14 @@ func (c *Client) newStatusWatcher(opts ...WaitOption) (*statusWaiter, error) { waitContext = c.WaitContext } return &statusWaiter{ - restMapper: restMapper, - client: dynamicClient, - ctx: waitContext, - readers: o.statusReaders, + restMapper: restMapper, + client: dynamicClient, + ctx: waitContext, + watchUntilReadyCtx: o.watchUntilReadyCtx, + waitCtx: o.waitCtx, + waitWithJobsCtx: o.waitWithJobsCtx, + waitForDeleteCtx: o.waitForDeleteCtx, + readers: o.statusReaders, }, nil } diff --git a/pkg/kube/options.go b/pkg/kube/options.go index 49c6229ba..3326c284b 100644 --- a/pkg/kube/options.go +++ b/pkg/kube/options.go @@ -26,12 +26,45 @@ import ( type WaitOption func(*waitOptions) // WithWaitContext sets the context for waiting on resources. +// If unset, context.Background() will be used. func WithWaitContext(ctx context.Context) WaitOption { return func(wo *waitOptions) { wo.ctx = ctx } } +// WithWatchUntilReadyMethodContext sets the context specifically for the WatchUntilReady method. +// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`). +func WithWatchUntilReadyMethodContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.watchUntilReadyCtx = ctx + } +} + +// WithWaitMethodContext sets the context specifically for the Wait method. +// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`). +func WithWaitMethodContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.waitCtx = ctx + } +} + +// WithWaitWithJobsMethodContext sets the context specifically for the WaitWithJobs method. +// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`). +func WithWaitWithJobsMethodContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.waitWithJobsCtx = ctx + } +} + +// WithWaitForDeleteMethodContext sets the context specifically for the WaitForDelete method. +// If unset, the context set by `WithWaitContext` will be used (falling back to `context.Background()`). +func WithWaitForDeleteMethodContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.waitForDeleteCtx = ctx + } +} + // WithKStatusReaders sets the status readers to be used while waiting on resources. func WithKStatusReaders(readers ...engine.StatusReader) WaitOption { return func(wo *waitOptions) { @@ -40,6 +73,10 @@ func WithKStatusReaders(readers ...engine.StatusReader) WaitOption { } type waitOptions struct { - ctx context.Context - statusReaders []engine.StatusReader + ctx context.Context + watchUntilReadyCtx context.Context + waitCtx context.Context + waitWithJobsCtx context.Context + waitForDeleteCtx context.Context + statusReaders []engine.StatusReader } diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index bd6e4f93a..84819492b 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -42,10 +42,14 @@ import ( ) type statusWaiter struct { - client dynamic.Interface - restMapper meta.RESTMapper - ctx context.Context - readers []engine.StatusReader + client dynamic.Interface + restMapper meta.RESTMapper + ctx context.Context + watchUntilReadyCtx context.Context + waitCtx context.Context + waitWithJobsCtx context.Context + waitForDeleteCtx context.Context + readers []engine.StatusReader } // DefaultStatusWatcherTimeout is the timeout used by the status waiter when a @@ -66,7 +70,7 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D if timeout == 0 { timeout = DefaultStatusWatcherTimeout } - ctx, cancel := w.contextWithTimeout(timeout) + ctx, cancel := w.contextWithTimeout(w.watchUntilReadyCtx, timeout) defer cancel() slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) @@ -88,7 +92,7 @@ func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) er if timeout == 0 { timeout = DefaultStatusWatcherTimeout } - ctx, cancel := w.contextWithTimeout(timeout) + ctx, cancel := w.contextWithTimeout(w.waitCtx, timeout) defer cancel() slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) @@ -100,7 +104,7 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura if timeout == 0 { timeout = DefaultStatusWatcherTimeout } - ctx, cancel := w.contextWithTimeout(timeout) + ctx, cancel := w.contextWithTimeout(w.waitWithJobsCtx, timeout) defer cancel() slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) @@ -116,7 +120,7 @@ func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Dur if timeout == 0 { timeout = DefaultStatusWatcherTimeout } - ctx, cancel := w.contextWithTimeout(timeout) + ctx, cancel := w.contextWithTimeout(w.waitForDeleteCtx, timeout) defer cancel() slog.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) @@ -210,8 +214,11 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w return nil } -func (w *statusWaiter) contextWithTimeout(timeout time.Duration) (context.Context, context.CancelFunc) { - return contextWithTimeout(w.ctx, timeout) +func (w *statusWaiter) contextWithTimeout(methodCtx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + if methodCtx == nil { + methodCtx = w.ctx + } + return contextWithTimeout(methodCtx, timeout) } func contextWithTimeout(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 0d60a526f..83aaa357a 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -1246,6 +1246,475 @@ func TestStatusWaitWithFailedResources(t *testing.T) { } } +func TestWaitOptionFunctions(t *testing.T) { + t.Parallel() + + t.Run("WithWatchUntilReadyMethodContext sets watchUntilReadyCtx", func(t *testing.T) { + t.Parallel() + type contextKey struct{} + ctx := context.WithValue(context.Background(), contextKey{}, "test") + opts := &waitOptions{} + WithWatchUntilReadyMethodContext(ctx)(opts) + assert.Equal(t, ctx, opts.watchUntilReadyCtx) + }) + + t.Run("WithWaitMethodContext sets waitCtx", func(t *testing.T) { + t.Parallel() + type contextKey struct{} + ctx := context.WithValue(context.Background(), contextKey{}, "test") + opts := &waitOptions{} + WithWaitMethodContext(ctx)(opts) + assert.Equal(t, ctx, opts.waitCtx) + }) + + t.Run("WithWaitWithJobsMethodContext sets waitWithJobsCtx", func(t *testing.T) { + t.Parallel() + type contextKey struct{} + ctx := context.WithValue(context.Background(), contextKey{}, "test") + opts := &waitOptions{} + WithWaitWithJobsMethodContext(ctx)(opts) + assert.Equal(t, ctx, opts.waitWithJobsCtx) + }) + + t.Run("WithWaitForDeleteMethodContext sets waitForDeleteCtx", func(t *testing.T) { + t.Parallel() + type contextKey struct{} + ctx := context.WithValue(context.Background(), contextKey{}, "test") + opts := &waitOptions{} + WithWaitForDeleteMethodContext(ctx)(opts) + assert.Equal(t, ctx, opts.waitForDeleteCtx) + }) +} + +func TestMethodSpecificContextCancellation(t *testing.T) { + t.Parallel() + + t.Run("WatchUntilReady uses method-specific context", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled method-specific context + methodCtx, methodCancel := context.WithCancel(context.Background()) + methodCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: context.Background(), // General context is not cancelled + watchUntilReadyCtx: methodCtx, // Method context is cancelled + } + + objs := getRuntimeObjFromManifests(t, []string{podCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WatchUntilReady(resourceList, time.Second*3) + // Should fail due to cancelled method context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("Wait uses method-specific context", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled method-specific context + methodCtx, methodCancel := context.WithCancel(context.Background()) + methodCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: context.Background(), // General context is not cancelled + waitCtx: methodCtx, // Method context is cancelled + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.Wait(resourceList, time.Second*3) + // Should fail due to cancelled method context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("WaitWithJobs uses method-specific context", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + + // Create a cancelled method-specific context + methodCtx, methodCancel := context.WithCancel(context.Background()) + methodCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: context.Background(), // General context is not cancelled + waitWithJobsCtx: methodCtx, // Method context is cancelled + } + + objs := getRuntimeObjFromManifests(t, []string{jobCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitWithJobs(resourceList, time.Second*3) + // Should fail due to cancelled method context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("WaitForDelete uses method-specific context", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled method-specific context + methodCtx, methodCancel := context.WithCancel(context.Background()) + methodCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: context.Background(), // General context is not cancelled + waitForDeleteCtx: methodCtx, // Method context is cancelled + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitForDelete(resourceList, time.Second*3) + // Should fail due to cancelled method context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) +} + +func TestMethodContextFallbackToGeneralContext(t *testing.T) { + t.Parallel() + + t.Run("WatchUntilReady falls back to general context when method context is nil", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled general context + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // General context is cancelled + watchUntilReadyCtx: nil, // Method context is nil, should fall back + } + + objs := getRuntimeObjFromManifests(t, []string{podCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WatchUntilReady(resourceList, time.Second*3) + // Should fail due to cancelled general context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("Wait falls back to general context when method context is nil", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled general context + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // General context is cancelled + waitCtx: nil, // Method context is nil, should fall back + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.Wait(resourceList, time.Second*3) + // Should fail due to cancelled general context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("WaitWithJobs falls back to general context when method context is nil", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + + // Create a cancelled general context + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // General context is cancelled + waitWithJobsCtx: nil, // Method context is nil, should fall back + } + + objs := getRuntimeObjFromManifests(t, []string{jobCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitWithJobs(resourceList, time.Second*3) + // Should fail due to cancelled general context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("WaitForDelete falls back to general context when method context is nil", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // Create a cancelled general context + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() // Cancel immediately + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // General context is cancelled + waitForDeleteCtx: nil, // Method context is nil, should fall back + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitForDelete(resourceList, time.Second*3) + // Should fail due to cancelled general context + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) +} + +func TestMethodContextOverridesGeneralContext(t *testing.T) { + t.Parallel() + + t.Run("method-specific context overrides general context for WatchUntilReady", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // General context is cancelled, but method context is not + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // Cancelled + watchUntilReadyCtx: context.Background(), // Not cancelled - should be used + } + + objs := getRuntimeObjFromManifests(t, []string{podCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WatchUntilReady(resourceList, time.Second*3) + // Should succeed because method context is used and it's not cancelled + assert.NoError(t, err) + }) + + t.Run("method-specific context overrides general context for Wait", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // General context is cancelled, but method context is not + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // Cancelled + waitCtx: context.Background(), // Not cancelled - should be used + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.Wait(resourceList, time.Second*3) + // Should succeed because method context is used and it's not cancelled + assert.NoError(t, err) + }) + + t.Run("method-specific context overrides general context for WaitWithJobs", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + + // General context is cancelled, but method context is not + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // Cancelled + waitWithJobsCtx: context.Background(), // Not cancelled - should be used + } + + objs := getRuntimeObjFromManifests(t, []string{jobCompleteManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + + err := sw.WaitWithJobs(resourceList, time.Second*3) + // Should succeed because method context is used and it's not cancelled + assert.NoError(t, err) + }) + + t.Run("method-specific context overrides general context for WaitForDelete", func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + timeout := time.Second + timeUntilPodDelete := time.Millisecond * 500 + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + + // General context is cancelled, but method context is not + generalCtx, generalCancel := context.WithCancel(context.Background()) + generalCancel() + + sw := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + ctx: generalCtx, // Cancelled + waitForDeleteCtx: context.Background(), // Not cancelled - should be used + } + + objs := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + require.NoError(t, err) + } + + // Schedule deletion + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + go func(gvr schema.GroupVersionResource, u *unstructured.Unstructured) { + time.Sleep(timeUntilPodDelete) + err := fakeClient.Tracker().Delete(gvr, u.GetNamespace(), u.GetName()) + assert.NoError(t, err) + }(gvr, u) + } + + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := sw.WaitForDelete(resourceList, timeout) + // Should succeed because method context is used and it's not cancelled + assert.NoError(t, err) + }) +} + func TestWatchUntilReadyWithCustomReaders(t *testing.T) { t.Parallel() tests := []struct { From 63b40a7a5e0d3f00ef2b4c1de9f50fb7d6df4ead Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 22 Jan 2026 09:12:53 -0500 Subject: [PATCH 8/9] use logger with waiter Signed-off-by: Austin Abro --- pkg/kube/client.go | 7 ++-- pkg/kube/statuswait.go | 20 ++++++----- pkg/kube/statuswait_test.go | 71 +++++++++++++++++++++---------------- 3 files changed, 56 insertions(+), 42 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index ad8f851d1..c4ea1e596 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -166,7 +166,8 @@ func (c *Client) newStatusWatcher(opts ...WaitOption) (*statusWaiter, error) { if waitContext == nil { waitContext = c.WaitContext } - return &statusWaiter{ + + sw := &statusWaiter{ restMapper: restMapper, client: dynamicClient, ctx: waitContext, @@ -175,7 +176,9 @@ func (c *Client) newStatusWatcher(opts ...WaitOption) (*statusWaiter, error) { waitWithJobsCtx: o.waitWithJobsCtx, waitForDeleteCtx: o.waitForDeleteCtx, readers: o.statusReaders, - }, nil + } + sw.SetLogger(c.Logger().Handler()) + return sw, nil } func (c *Client) GetWaiter(ws WaitStrategy) (Waiter, error) { diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 84819492b..01024afa6 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -38,6 +38,7 @@ import ( "k8s.io/client-go/dynamic" watchtools "k8s.io/client-go/tools/watch" + "helm.sh/helm/v4/internal/logging" helmStatusReaders "helm.sh/helm/v4/internal/statusreaders" ) @@ -50,6 +51,7 @@ type statusWaiter struct { waitWithJobsCtx context.Context waitForDeleteCtx context.Context readers []engine.StatusReader + logging.LogHolder } // DefaultStatusWatcherTimeout is the timeout used by the status waiter when a @@ -72,7 +74,7 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D } ctx, cancel := w.contextWithTimeout(w.watchUntilReadyCtx, timeout) defer cancel() - slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) + w.Logger().Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper) @@ -94,7 +96,7 @@ func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) er } ctx, cancel := w.contextWithTimeout(w.waitCtx, timeout) defer cancel() - slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) + w.Logger().Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) sw.StatusReader = statusreaders.NewStatusReader(w.restMapper, w.readers...) return w.wait(ctx, resourceList, sw) @@ -106,7 +108,7 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura } ctx, cancel := w.contextWithTimeout(w.waitWithJobsCtx, timeout) defer cancel() - slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) + w.Logger().Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) readers := append([]engine.StatusReader(nil), w.readers...) @@ -122,7 +124,7 @@ func (w *statusWaiter) WaitForDelete(resourceList ResourceList, timeout time.Dur } ctx, cancel := w.contextWithTimeout(w.waitForDeleteCtx, timeout) defer cancel() - slog.Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout) + w.Logger().Debug("waiting for resources to be deleted", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) return w.waitForDelete(ctx, resourceList, sw) } @@ -142,7 +144,7 @@ func (w *statusWaiter) waitForDelete(ctx context.Context, resourceList ResourceL RESTScopeStrategy: watcher.RESTScopeNamespace, }) statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus)) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.NotFoundStatus, w.Logger())) <-done if statusCollector.Error != nil { @@ -189,7 +191,7 @@ func (w *statusWaiter) wait(ctx context.Context, resourceList ResourceList, sw w RESTScopeStrategy: watcher.RESTScopeNamespace, }) statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus)) + done := statusCollector.ListenWithObserver(eventCh, statusObserver(cancel, status.CurrentStatus, w.Logger())) <-done if statusCollector.Error != nil { @@ -228,7 +230,7 @@ func contextWithTimeout(ctx context.Context, timeout time.Duration) (context.Con return watchtools.ContextWithOptionalTimeout(ctx, timeout) } -func statusObserver(cancel context.CancelFunc, desired status.Status) collector.ObserverFunc { +func statusObserver(cancel context.CancelFunc, desired status.Status, logger *slog.Logger) collector.ObserverFunc { return func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { var rss []*event.ResourceStatus var nonDesiredResources []*event.ResourceStatus @@ -253,7 +255,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector. } if aggregator.AggregateStatus(rss, desired) == desired { - slog.Debug("all resources achieved desired status", "desiredStatus", desired, "resourceCount", len(rss)) + logger.Debug("all resources achieved desired status", "desiredStatus", desired, "resourceCount", len(rss)) cancel() return } @@ -264,7 +266,7 @@ func statusObserver(cancel context.CancelFunc, desired status.Status) collector. return nonDesiredResources[i].Identifier.Name < nonDesiredResources[j].Identifier.Name }) first := nonDesiredResources[0] - slog.Debug("waiting for resource", "namespace", first.Identifier.Namespace, "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status) + logger.Debug("waiting for resource", "namespace", first.Identifier.Namespace, "name", first.Identifier.Name, "kind", first.Identifier.GroupKind.Kind, "expectedStatus", desired, "actualStatus", first.Status) } } } diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index 83aaa357a..d2dd57872 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" "sync/atomic" "testing" @@ -326,6 +327,7 @@ func TestStatusWaitForDelete(t *testing.T) { restMapper: fakeMapper, client: fakeClient, } + statusWaiter.SetLogger(slog.Default().Handler()) objsToCreate := getRuntimeObjFromManifests(t, tt.manifestsToCreate) for _, objToCreate := range objsToCreate { u := objToCreate.(*unstructured.Unstructured) @@ -369,6 +371,7 @@ func TestStatusWaitForDeleteNonExistentObject(t *testing.T) { restMapper: fakeMapper, client: fakeClient, } + statusWaiter.SetLogger(slog.Default().Handler()) // Don't create the object to test that the wait for delete works when the object doesn't exist objManifest := getRuntimeObjFromManifests(t, []string{podCurrentManifest}) resourceList := getResourceListFromRuntimeObjs(t, c, objManifest) @@ -425,6 +428,7 @@ func TestStatusWait(t *testing.T) { client: fakeClient, restMapper: fakeMapper, } + statusWaiter.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -481,6 +485,7 @@ func TestWaitForJobComplete(t *testing.T) { client: fakeClient, restMapper: fakeMapper, } + statusWaiter.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -543,6 +548,7 @@ func TestWatchForReady(t *testing.T) { client: fakeClient, restMapper: fakeMapper, } + statusWaiter.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -570,19 +576,19 @@ func TestStatusWaitMultipleNamespaces(t *testing.T) { name string objManifests []string expectErrStrs []string - testFunc func(statusWaiter, ResourceList, time.Duration) error + testFunc func(*statusWaiter, ResourceList, time.Duration) error }{ { name: "pods in multiple namespaces", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, { name: "hooks in multiple namespaces", objManifests: []string{jobNamespace1CompleteManifest, podNamespace2SucceededManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WatchUntilReady(rl, timeout) }, }, @@ -590,42 +596,42 @@ func TestStatusWaitMultipleNamespaces(t *testing.T) { name: "error when resource not ready in one namespace", objManifests: []string{podNamespace1NoStatusManifest, podNamespace2Manifest}, expectErrStrs: []string{"resource Pod/namespace-1/pod-ns1 not ready. status: InProgress", "context deadline exceeded"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, { name: "delete resources in multiple namespaces", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, { name: "cluster-scoped resources work correctly with unrestricted permissions", objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, { name: "namespace-scoped and cluster-scoped resources work together", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest, clusterRoleManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, { name: "delete cluster-scoped resources works correctly", objManifests: []string{podNamespace1Manifest, namespaceManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, { name: "watch cluster-scoped resources works correctly", objManifests: []string{clusterRoleManifest}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WatchUntilReady(rl, timeout) }, }, @@ -646,6 +652,7 @@ func TestStatusWaitMultipleNamespaces(t *testing.T) { client: fakeClient, restMapper: fakeMapper, } + sw.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -668,7 +675,7 @@ func TestStatusWaitMultipleNamespaces(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) - err := tt.testFunc(sw, resourceList, time.Second*3) + err := tt.testFunc(&sw, resourceList, time.Second*3) if tt.expectErrStrs != nil { require.Error(t, err) for _, expectedErrStr := range tt.expectErrStrs { @@ -756,13 +763,13 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { objManifests []string allowedNamespaces []string expectErrs []error - testFunc func(statusWaiter, ResourceList, time.Duration) error + testFunc func(*statusWaiter, ResourceList, time.Duration) error }{ { name: "pods in multiple namespaces with namespace permissions", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1", "namespace-2"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -770,7 +777,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { name: "delete pods in multiple namespaces with namespace permissions", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1", "namespace-2"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, @@ -778,7 +785,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { name: "hooks in multiple namespaces with namespace permissions", objManifests: []string{jobNamespace1CompleteManifest, podNamespace2SucceededManifest}, allowedNamespaces: []string{"namespace-1", "namespace-2"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WatchUntilReady(rl, timeout) }, }, @@ -787,7 +794,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -796,7 +803,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { objManifests: []string{podNamespace1Manifest, namespaceManifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, @@ -805,7 +812,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have LIST permissions in namespace %q", "namespace-2")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -827,6 +834,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { client: baseFakeClient, restMapper: fakeMapper, } + sw.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -849,7 +857,7 @@ func TestStatusWaitRestrictedRBAC(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) - err := tt.testFunc(sw, resourceList, time.Second*3) + err := tt.testFunc(&sw, resourceList, time.Second*3) if tt.expectErrs != nil { require.Error(t, err) for _, expectedErr := range tt.expectErrs { @@ -870,13 +878,13 @@ func TestStatusWaitMixedResources(t *testing.T) { objManifests []string allowedNamespaces []string expectErrs []error - testFunc func(statusWaiter, ResourceList, time.Duration) error + testFunc func(*statusWaiter, ResourceList, time.Duration) error }{ { name: "wait succeeds with namespace-scoped resources only", objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1", "namespace-2"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -885,7 +893,7 @@ func TestStatusWaitMixedResources(t *testing.T) { objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -894,7 +902,7 @@ func TestStatusWaitMixedResources(t *testing.T) { objManifests: []string{podNamespace1Manifest, clusterRoleManifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitForDelete(rl, timeout) }, }, @@ -903,7 +911,7 @@ func TestStatusWaitMixedResources(t *testing.T) { objManifests: []string{podNamespace1Manifest, namespaceManifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have cluster-wide LIST permissions for cluster-scoped resources")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -912,7 +920,7 @@ func TestStatusWaitMixedResources(t *testing.T) { objManifests: []string{podNamespace1Manifest, podNamespace2Manifest}, allowedNamespaces: []string{"namespace-1"}, expectErrs: []error{fmt.Errorf("user does not have LIST permissions in namespace %q", "namespace-2")}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -934,6 +942,7 @@ func TestStatusWaitMixedResources(t *testing.T) { client: baseFakeClient, restMapper: fakeMapper, } + sw.SetLogger(slog.Default().Handler()) objs := getRuntimeObjFromManifests(t, tt.objManifests) for _, obj := range objs { u := obj.(*unstructured.Unstructured) @@ -956,7 +965,7 @@ func TestStatusWaitMixedResources(t *testing.T) { } resourceList := getResourceListFromRuntimeObjs(t, c, objs) - err := tt.testFunc(sw, resourceList, time.Second*3) + err := tt.testFunc(&sw, resourceList, time.Second*3) if tt.expectErrs != nil { require.Error(t, err) for _, expectedErr := range tt.expectErrs { @@ -1151,7 +1160,7 @@ func TestStatusWaitWithFailedResources(t *testing.T) { objManifests []string customReader *mockStatusReader expectErrStrs []string - testFunc func(statusWaiter, ResourceList, time.Duration) error + testFunc func(*statusWaiter, ResourceList, time.Duration) error }{ { name: "Wait returns error when resource has failed", @@ -1161,7 +1170,7 @@ func TestStatusWaitWithFailedResources(t *testing.T) { status: status.FailedStatus, }, expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -1172,7 +1181,7 @@ func TestStatusWaitWithFailedResources(t *testing.T) { expectErrStrs: []string{ "resource Job/default/failed-job not ready. status: Failed", }, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WaitWithJobs(rl, timeout) }, }, @@ -1188,7 +1197,7 @@ func TestStatusWaitWithFailedResources(t *testing.T) { "resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader", "resource Pod/ns/current-pod not ready. status: Failed, message: mock status reader", }, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.Wait(rl, timeout) }, }, @@ -1201,7 +1210,7 @@ func TestStatusWaitWithFailedResources(t *testing.T) { }, // WatchUntilReady also waits for CurrentStatus, so failed resources should return error expectErrStrs: []string{"resource Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader"}, - testFunc: func(sw statusWaiter, rl ResourceList, timeout time.Duration) error { + testFunc: func(sw *statusWaiter, rl ResourceList, timeout time.Duration) error { return sw.WatchUntilReady(rl, timeout) }, }, @@ -1233,7 +1242,7 @@ func TestStatusWaitWithFailedResources(t *testing.T) { assert.NoError(t, err) } resourceList := getResourceListFromRuntimeObjs(t, c, objs) - err := tt.testFunc(sw, resourceList, time.Second*3) + err := tt.testFunc(&sw, resourceList, time.Second*3) if tt.expectErrStrs != nil { require.Error(t, err) for _, expectedErrStr := range tt.expectErrStrs { From ec0726523e52448eb05c8b5b3faae969e3a79266 Mon Sep 17 00:00:00 2001 From: Austin Abro Date: Thu, 22 Jan 2026 15:45:29 -0500 Subject: [PATCH 9/9] whitespace Signed-off-by: Austin Abro --- pkg/kube/client.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index c4ea1e596..fc706496d 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -166,7 +166,6 @@ func (c *Client) newStatusWatcher(opts ...WaitOption) (*statusWaiter, error) { if waitContext == nil { waitContext = c.WaitContext } - sw := &statusWaiter{ restMapper: restMapper, client: dynamicClient,