From 7a557580e1b5aac8327331e720e74bbbe49c0ad7 Mon Sep 17 00:00:00 2001 From: Michelle Fernandez Bieber Date: Sat, 22 Jan 2022 23:20:18 +0100 Subject: [PATCH 01/18] added shutdown hook that is executed after the logs have been retrieved Signed-off-by: Michelle Fernandez Bieber --- pkg/action/hooks.go | 75 ++++++++++++++++++++++------------- pkg/action/release_testing.go | 20 +++++----- pkg/cmd/release_testing.go | 3 +- 3 files changed, 61 insertions(+), 37 deletions(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 1e4fec9bd..c86964c65 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -33,6 +33,24 @@ import ( // execHook executes all of the hooks for the given hook event. func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) error { + shutdown, err := cfg.execHookWithDelayedShutdown(rl, hook, waitStrategy, timeout, serverSideApply) + if err != nil { + if err := shutdown(); err != nil { + return err + } + return err + } + return shutdown() +} + +type ExecuteShutdownHooks = func() error + +func ShutdownNoOp() error { + return nil +} + +// execHook executes all of the hooks for the given hook event and return a shutdownHook function to trigger deletions after doing other things like e.g. retrieving logs. +func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) (ExecuteShutdownHooks, error) { executingHooks := []*release.Hook{} for _, h := range rl.Hooks { @@ -51,12 +69,12 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, cfg.hookSetDeletePolicy(h) if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, timeout); err != nil { - return err + return ShutdownNoOp, err } resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), true) if err != nil { - return fmt.Errorf("unable to build kubernetes object for %s hook %s: %w", hook, h.Path, err) + return ShutdownNoOp, fmt.Errorf("unable to build kubernetes object for %s hook %s: %w", hook, h.Path, err) } // Record the time at which the hook was applied to the cluster @@ -77,12 +95,12 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, kube.ClientCreateOptionServerSideApply(serverSideApply, false)); err != nil { h.LastRun.CompletedAt = time.Now() h.LastRun.Phase = release.HookPhaseFailed - return fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) + return ShutdownNoOp, fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) } waiter, err := cfg.KubeClient.GetWaiter(waitStrategy) if err != nil { - return fmt.Errorf("unable to get waiter: %w", err) + return ShutdownNoOp, fmt.Errorf("unable to get waiter: %w", err) } // Watch hook resources until they have completed err = waiter.WatchUntilReady(resources, timeout) @@ -98,36 +116,39 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, } // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // under failed condition. If so, then clear the corresponding resource object in the hook - if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, timeout); errDeleting != nil { - // We log the error here as we want to propagate the hook failure upwards to the release object. - log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) - } - - // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hooks - // should be deleted under succeeded condition. - if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, waitStrategy, timeout); err != nil { + return func() error { + if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, timeout); errDeleting != nil { + // We log the error here as we want to propagate the hook failure upwards to the release object. + log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) + } + + // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hooks + // should be deleted under succeeded condition. + if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, waitStrategy, timeout); err != nil { + return err + } return err - } + }, err - return err } h.LastRun.Phase = release.HookPhaseSucceeded } - // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted - // or output should be logged under succeeded condition. If so, then clear the corresponding resource object in each hook - for i := len(executingHooks) - 1; i >= 0; i-- { - h := executingHooks[i] - if err := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnSucceeded); err != nil { - // We log here as we still want to attempt hook resource deletion even if output logging fails. - log.Printf("error outputting logs for hook failure: %v", err) - } - if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, timeout); err != nil { - return err + return func() error { + // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted + // or output should be logged under succeeded condition. If so, then clear the corresponding resource object in each hook + for i := len(executingHooks) - 1; i >= 0; i-- { + h := executingHooks[i] + if err := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnSucceeded); err != nil { + // We log here as we still want to attempt hook resource deletion even if output logging fails. + log.Printf("error outputting logs for hook failure: %v", err) + } + if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, timeout); err != nil { + return err + } } - } - - return nil + return nil + }, nil } // hookByWeight is a sorter for hooks diff --git a/pkg/action/release_testing.go b/pkg/action/release_testing.go index b649579f4..2aa063515 100644 --- a/pkg/action/release_testing.go +++ b/pkg/action/release_testing.go @@ -57,24 +57,24 @@ func NewReleaseTesting(cfg *Configuration) *ReleaseTesting { } // Run executes 'helm test' against the given release. -func (r *ReleaseTesting) Run(name string) (ri.Releaser, error) { +func (r *ReleaseTesting) Run(name string) (ri.Releaser, ExecuteShutdownHooks, error) { if err := r.cfg.KubeClient.IsReachable(); err != nil { - return nil, err + return nil, ShutdownNoOp, err } if err := chartutil.ValidateReleaseName(name); err != nil { - return nil, fmt.Errorf("releaseTest: Release name is invalid: %s", name) + return nil, ShutdownNoOp, fmt.Errorf("releaseTest: Release name is invalid: %s", name) } // finds the non-deleted release with the given name reli, err := r.cfg.Releases.Last(name) if err != nil { - return reli, err + return reli, ShutdownNoOp, err } rel, err := releaserToV1Release(reli) if err != nil { - return rel, err + return reli, ShutdownNoOp, err } skippedHooks := []*release.Hook{} @@ -102,14 +102,16 @@ func (r *ReleaseTesting) Run(name string) (ri.Releaser, error) { } serverSideApply := rel.ApplyMethod == string(release.ApplyMethodServerSideApply) - if err := r.cfg.execHook(rel, release.HookTest, kube.StatusWatcherStrategy, r.Timeout, serverSideApply); err != nil { + shutdown, err := r.cfg.execHookWithDelayedShutdown(rel, release.HookTest, kube.StatusWatcherStrategy, r.Timeout, serverSideApply) + + if err != nil { rel.Hooks = append(skippedHooks, rel.Hooks...) - r.cfg.Releases.Update(rel) - return rel, err + r.cfg.Releases.Update(reli) + return reli, shutdown, err } rel.Hooks = append(skippedHooks, rel.Hooks...) - return rel, r.cfg.Releases.Update(rel) + return reli, shutdown, r.cfg.Releases.Update(reli) } // GetPodLogs will write the logs for all test pods in the given release into diff --git a/pkg/cmd/release_testing.go b/pkg/cmd/release_testing.go index 88a6f351f..3d6921e71 100644 --- a/pkg/cmd/release_testing.go +++ b/pkg/cmd/release_testing.go @@ -65,7 +65,8 @@ func newReleaseTestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command client.Filters[action.ExcludeNameFilter] = append(client.Filters[action.ExcludeNameFilter], notName.ReplaceAllLiteralString(f, "")) } } - reli, runErr := client.Run(args[0]) + reli, shutdown, runErr := client.Run(args[0]) + defer shutdown() // We only return an error if we weren't even able to get the // release, otherwise we keep going so we can print status and logs // if requested From 10714772bedf15c207bccf85be21f2e244d4a4ff Mon Sep 17 00:00:00 2001 From: Michelle Fernandez Bieber Date: Sun, 23 Jan 2022 00:17:09 +0100 Subject: [PATCH 02/18] updated comment and made defer of shutdown function return errors as before and not the possible shutdown error Signed-off-by: Michelle Fernandez Bieber --- pkg/action/hooks.go | 2 +- pkg/cmd/release_testing.go | 13 +++++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index c86964c65..3a0aabb61 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -49,7 +49,7 @@ func ShutdownNoOp() error { return nil } -// execHook executes all of the hooks for the given hook event and return a shutdownHook function to trigger deletions after doing other things like e.g. retrieving logs. +// execHook executes all of the hooks for the given hook event and returns a shutdownHook function to trigger deletions after doing other things like e.g. retrieving logs. func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) (ExecuteShutdownHooks, error) { executingHooks := []*release.Hook{} diff --git a/pkg/cmd/release_testing.go b/pkg/cmd/release_testing.go index 3d6921e71..30de160ed 100644 --- a/pkg/cmd/release_testing.go +++ b/pkg/cmd/release_testing.go @@ -55,7 +55,7 @@ func newReleaseTestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command } return compListReleases(toComplete, args, cfg) }, - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) (returnError error) { client.Namespace = settings.Namespace() notName := regexp.MustCompile(`^!\s?name=`) for _, f := range filter { @@ -65,8 +65,17 @@ func newReleaseTestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command client.Filters[action.ExcludeNameFilter] = append(client.Filters[action.ExcludeNameFilter], notName.ReplaceAllLiteralString(f, "")) } } + reli, shutdown, runErr := client.Run(args[0]) - defer shutdown() + defer func() { + if shutdownErr := shutdown(); shutdownErr != nil { + if returnError == nil { + returnError = shutdownErr + } + } + }() + + // We only return an error if we weren't even able to get the // release, otherwise we keep going so we can print status and logs // if requested From 7a61ebf01370d67684681c0d210ddec90fda3503 Mon Sep 17 00:00:00 2001 From: Michelle Fernandez Bieber Date: Sun, 23 Jan 2022 00:25:32 +0100 Subject: [PATCH 03/18] cleaned up empty line Signed-off-by: Michelle Fernandez Bieber --- pkg/action/hooks.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 3a0aabb61..bd8e96e74 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -129,7 +129,6 @@ func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook } return err }, err - } h.LastRun.Phase = release.HookPhaseSucceeded } From d9301441f47bb8cf4c84ffcbed22f39ba70b1588 Mon Sep 17 00:00:00 2001 From: Michelle Fernandez Bieber Date: Fri, 22 Apr 2022 22:55:35 +0200 Subject: [PATCH 04/18] added check for nil shutdown Signed-off-by: Michelle Fernandez Bieber --- pkg/action/hooks.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index bd8e96e74..adf7478f6 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -34,6 +34,9 @@ import ( // execHook executes all of the hooks for the given hook event. func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) error { shutdown, err := cfg.execHookWithDelayedShutdown(rl, hook, waitStrategy, timeout, serverSideApply) + if shutdown == nil { + return nil + } if err != nil { if err := shutdown(); err != nil { return err From 6bb5bcc212fd014ad521eab8b1ffd94f3f4f0f71 Mon Sep 17 00:00:00 2001 From: Michelle Fernandez Bieber <37021266+mfbieber@users.noreply.github.com> Date: Sun, 23 Jul 2023 08:17:57 +0200 Subject: [PATCH 05/18] Update pkg/action/hooks.go Co-authored-by: Marco Lecheler Signed-off-by: Michelle Fernandez Bieber <37021266+mfbieber@users.noreply.github.com> --- pkg/action/hooks.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index adf7478f6..c336e10fe 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -52,7 +52,7 @@ func ShutdownNoOp() error { return nil } -// execHook executes all of the hooks for the given hook event and returns a shutdownHook function to trigger deletions after doing other things like e.g. retrieving logs. +// execHookWithDelayedShutdown executes all of the hooks for the given hook event and returns a shutdownHook function to trigger deletions after doing other things like e.g. retrieving logs. func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) (ExecuteShutdownHooks, error) { executingHooks := []*release.Hook{} From 9f1c8a26f00ebbe2942064a06b49c275162d10ef Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Wed, 26 Nov 2025 17:24:06 +0100 Subject: [PATCH 06/18] Fix linting issue Signed-off-by: Benoit Tigeot --- pkg/cmd/release_testing.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/cmd/release_testing.go b/pkg/cmd/release_testing.go index 30de160ed..5a6159e7d 100644 --- a/pkg/cmd/release_testing.go +++ b/pkg/cmd/release_testing.go @@ -75,7 +75,6 @@ func newReleaseTestCmd(cfg *action.Configuration, out io.Writer) *cobra.Command } }() - // We only return an error if we weren't even able to get the // release, otherwise we keep going so we can print status and logs // if requested From 70fc5f97e2b42a2cf87ff8756d6baf4ab2d5a920 Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Wed, 26 Nov 2025 23:29:57 +0100 Subject: [PATCH 07/18] Code review Signed-off-by: Benoit Tigeot --- pkg/action/hooks.go | 16 ++++++++-------- pkg/action/release_testing.go | 10 +++++----- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index c336e10fe..2ea851a3e 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -35,7 +35,7 @@ import ( func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) error { shutdown, err := cfg.execHookWithDelayedShutdown(rl, hook, waitStrategy, timeout, serverSideApply) if shutdown == nil { - return nil + return err } if err != nil { if err := shutdown(); err != nil { @@ -46,14 +46,14 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, return shutdown() } -type ExecuteShutdownHooks = func() error +type executeShutdownFunc = func() error -func ShutdownNoOp() error { +func shutdownNoOp() error { return nil } // execHookWithDelayedShutdown executes all of the hooks for the given hook event and returns a shutdownHook function to trigger deletions after doing other things like e.g. retrieving logs. -func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) (ExecuteShutdownHooks, error) { +func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) (executeShutdownFunc, error) { executingHooks := []*release.Hook{} for _, h := range rl.Hooks { @@ -72,12 +72,12 @@ func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook cfg.hookSetDeletePolicy(h) if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, timeout); err != nil { - return ShutdownNoOp, err + return shutdownNoOp, err } resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), true) if err != nil { - return ShutdownNoOp, fmt.Errorf("unable to build kubernetes object for %s hook %s: %w", hook, h.Path, err) + return shutdownNoOp, fmt.Errorf("unable to build kubernetes object for %s hook %s: %w", hook, h.Path, err) } // Record the time at which the hook was applied to the cluster @@ -98,12 +98,12 @@ func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook kube.ClientCreateOptionServerSideApply(serverSideApply, false)); err != nil { h.LastRun.CompletedAt = time.Now() h.LastRun.Phase = release.HookPhaseFailed - return ShutdownNoOp, fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) + return shutdownNoOp, fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) } waiter, err := cfg.KubeClient.GetWaiter(waitStrategy) if err != nil { - return ShutdownNoOp, fmt.Errorf("unable to get waiter: %w", err) + return shutdownNoOp, fmt.Errorf("unable to get waiter: %w", err) } // Watch hook resources until they have completed err = waiter.WatchUntilReady(resources, timeout) diff --git a/pkg/action/release_testing.go b/pkg/action/release_testing.go index 2aa063515..673a678f5 100644 --- a/pkg/action/release_testing.go +++ b/pkg/action/release_testing.go @@ -57,24 +57,24 @@ func NewReleaseTesting(cfg *Configuration) *ReleaseTesting { } // Run executes 'helm test' against the given release. -func (r *ReleaseTesting) Run(name string) (ri.Releaser, ExecuteShutdownHooks, error) { +func (r *ReleaseTesting) Run(name string) (ri.Releaser, executeShutdownFunc, error) { if err := r.cfg.KubeClient.IsReachable(); err != nil { - return nil, ShutdownNoOp, err + return nil, shutdownNoOp, err } if err := chartutil.ValidateReleaseName(name); err != nil { - return nil, ShutdownNoOp, fmt.Errorf("releaseTest: Release name is invalid: %s", name) + return nil, shutdownNoOp, fmt.Errorf("releaseTest: Release name is invalid: %s", name) } // finds the non-deleted release with the given name reli, err := r.cfg.Releases.Last(name) if err != nil { - return reli, ShutdownNoOp, err + return reli, shutdownNoOp, err } rel, err := releaserToV1Release(reli) if err != nil { - return reli, ShutdownNoOp, err + return reli, shutdownNoOp, err } skippedHooks := []*release.Hook{} From 45c5f3aaca1a37d8388ce7b79efe2dfaf84dcdcc Mon Sep 17 00:00:00 2001 From: Benoit Tigeot Date: Thu, 27 Nov 2025 08:48:31 +0100 Subject: [PATCH 08/18] Deal with golint warning with private executeShutdownFunc ``` Error: pkg/action/release_testing.go:60:57: unexported-return: exported method Run returns unexported type action.executeShutdownFunc, which can be annoying to use (revive) func (r *ReleaseTesting) Run(name string) (ri.Releaser, executeShutdownFunc, error) { ``` Signed-off-by: Benoit Tigeot --- pkg/action/hooks.go | 4 ++-- pkg/action/release_testing.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 2ea851a3e..49849a27d 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -46,14 +46,14 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, return shutdown() } -type executeShutdownFunc = func() error +type ExecuteShutdownFunc = func() error func shutdownNoOp() error { return nil } // execHookWithDelayedShutdown executes all of the hooks for the given hook event and returns a shutdownHook function to trigger deletions after doing other things like e.g. retrieving logs. -func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) (executeShutdownFunc, error) { +func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) (ExecuteShutdownFunc, error) { executingHooks := []*release.Hook{} for _, h := range rl.Hooks { diff --git a/pkg/action/release_testing.go b/pkg/action/release_testing.go index 673a678f5..992cdd701 100644 --- a/pkg/action/release_testing.go +++ b/pkg/action/release_testing.go @@ -57,7 +57,7 @@ func NewReleaseTesting(cfg *Configuration) *ReleaseTesting { } // Run executes 'helm test' against the given release. -func (r *ReleaseTesting) Run(name string) (ri.Releaser, executeShutdownFunc, error) { +func (r *ReleaseTesting) Run(name string) (ri.Releaser, ExecuteShutdownFunc, error) { if err := r.cfg.KubeClient.IsReachable(); err != nil { return nil, shutdownNoOp, err } From 5196b8431a40b9bd9e2e4aadb757b99bec11853f Mon Sep 17 00:00:00 2001 From: tison Date: Tue, 28 Oct 2025 17:37:22 +0800 Subject: [PATCH 09/18] Check plugin name is not used Signed-off-by: tison --- pkg/cmd/load_plugins.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/cmd/load_plugins.go b/pkg/cmd/load_plugins.go index ddda5fde6..d4bf7e7e3 100644 --- a/pkg/cmd/load_plugins.go +++ b/pkg/cmd/load_plugins.go @@ -132,7 +132,13 @@ func loadCLIPlugins(baseCmd *cobra.Command, out io.Writer) { DisableFlagParsing: true, } - // TODO: Make sure a command with this name does not already exist. + for _, cmd := range baseCmd.Commands() { + if cmd.Name() == c.Name() { + fmt.Fprintf(os.Stderr, "failed to load plugins: name conflicts %s\n", c.Name()) + return + } + } + baseCmd.AddCommand(c) // For completion, we try to load more details about the plugins so as to allow for command and From 3c6557dcf57ef4feee0d4840d4095f1479a63b40 Mon Sep 17 00:00:00 2001 From: tison Date: Thu, 30 Oct 2025 22:12:29 +0800 Subject: [PATCH 10/18] Fix TestCliPluginExitCode Signed-off-by: tison --- pkg/cmd/plugin_test.go | 10 +++++----- .../helm/plugins/{env => shortenv}/completion.yaml | 2 +- .../helm/plugins/{env => shortenv}/plugin-name.sh | 0 .../helm/plugins/{env => shortenv}/plugin.yaml | 2 +- pkg/cmd/testdata/output/plugin_list_comp.txt | 2 +- pkg/cmd/testdata/output/plugin_repeat_comp.txt | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) rename pkg/cmd/testdata/helmhome/helm/plugins/{env => shortenv}/completion.yaml (89%) rename pkg/cmd/testdata/helmhome/helm/plugins/{env => shortenv}/plugin-name.sh (100%) rename pkg/cmd/testdata/helmhome/helm/plugins/{env => shortenv}/plugin.yaml (93%) diff --git a/pkg/cmd/plugin_test.go b/pkg/cmd/plugin_test.go index 05cfe46f1..a250ba221 100644 --- a/pkg/cmd/plugin_test.go +++ b/pkg/cmd/plugin_test.go @@ -114,9 +114,9 @@ func TestLoadCLIPlugins(t *testing.T) { }{ {"args", "echo args", "This echos args", "-a -b -c\n", []string{"-a", "-b", "-c"}, 0}, {"echo", "echo stuff", "This echos stuff", "hello\n", []string{}, 0}, - {"env", "env stuff", "show the env", "HELM_PLUGIN_NAME=env\n", []string{}, 0}, {"exitwith", "exitwith code", "This exits with the specified exit code", "", []string{"2"}, 2}, {"fullenv", "show env vars", "show all env vars", fullEnvOutput, []string{}, 0}, + {"shortenv", "env stuff", "show the env", "HELM_PLUGIN_NAME=shortenv\n", []string{}, 0}, } pluginCmds := cmd.Commands() @@ -254,10 +254,6 @@ func TestLoadCLIPluginsForCompletion(t *testing.T) { tests := []staticCompletionDetails{ {"args", []string{}, []string{}, []staticCompletionDetails{}}, {"echo", []string{}, []string{}, []staticCompletionDetails{}}, - {"env", []string{}, []string{"global"}, []staticCompletionDetails{ - {"list", []string{}, []string{"a", "all", "log"}, []staticCompletionDetails{}}, - {"remove", []string{"all", "one"}, []string{}, []staticCompletionDetails{}}, - }}, {"exitwith", []string{}, []string{}, []staticCompletionDetails{ {"code", []string{}, []string{"a", "b"}, []staticCompletionDetails{}}, }}, @@ -268,6 +264,10 @@ func TestLoadCLIPluginsForCompletion(t *testing.T) { {"more", []string{"one", "two"}, []string{"b", "ball"}, []staticCompletionDetails{}}, }}, }}, + {"shortenv", []string{}, []string{"global"}, []staticCompletionDetails{ + {"list", []string{}, []string{"a", "all", "log"}, []staticCompletionDetails{}}, + {"remove", []string{"all", "one"}, []string{}, []staticCompletionDetails{}}, + }}, } checkCommand(t, cmd.Commands(), tests) } diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/completion.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/completion.yaml similarity index 89% rename from pkg/cmd/testdata/helmhome/helm/plugins/env/completion.yaml rename to pkg/cmd/testdata/helmhome/helm/plugins/shortenv/completion.yaml index e479a0503..027573ed4 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/env/completion.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/completion.yaml @@ -1,4 +1,4 @@ -name: env +name: shortenv commands: - name: list flags: diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin-name.sh b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin-name.sh similarity index 100% rename from pkg/cmd/testdata/helmhome/helm/plugins/env/plugin-name.sh rename to pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin-name.sh diff --git a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin.yaml similarity index 93% rename from pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml rename to pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin.yaml index 78a0a23fb..5fe053ed0 100644 --- a/pkg/cmd/testdata/helmhome/helm/plugins/env/plugin.yaml +++ b/pkg/cmd/testdata/helmhome/helm/plugins/shortenv/plugin.yaml @@ -1,6 +1,6 @@ --- apiVersion: v1 -name: env +name: shortenv type: cli/v1 runtime: subprocess config: diff --git a/pkg/cmd/testdata/output/plugin_list_comp.txt b/pkg/cmd/testdata/output/plugin_list_comp.txt index 833efc5e9..1dff43551 100644 --- a/pkg/cmd/testdata/output/plugin_list_comp.txt +++ b/pkg/cmd/testdata/output/plugin_list_comp.txt @@ -1,7 +1,7 @@ args echo args echo echo stuff -env env stuff exitwith exitwith code fullenv show env vars +shortenv env stuff :4 Completion ended with directive: ShellCompDirectiveNoFileComp diff --git a/pkg/cmd/testdata/output/plugin_repeat_comp.txt b/pkg/cmd/testdata/output/plugin_repeat_comp.txt index 3fa05f0b3..b46c1b7d4 100644 --- a/pkg/cmd/testdata/output/plugin_repeat_comp.txt +++ b/pkg/cmd/testdata/output/plugin_repeat_comp.txt @@ -1,6 +1,6 @@ echo echo stuff -env env stuff exitwith exitwith code fullenv show env vars +shortenv env stuff :4 Completion ended with directive: ShellCompDirectiveNoFileComp From 9772037ed8a9e273b8c125fbf39bc7254ac53b05 Mon Sep 17 00:00:00 2001 From: tison Date: Mon, 15 Dec 2025 13:37:55 +0800 Subject: [PATCH 11/18] Update to use slog Signed-off-by: tison --- pkg/cmd/load_plugins.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cmd/load_plugins.go b/pkg/cmd/load_plugins.go index d4bf7e7e3..7bdbf597d 100644 --- a/pkg/cmd/load_plugins.go +++ b/pkg/cmd/load_plugins.go @@ -134,7 +134,7 @@ func loadCLIPlugins(baseCmd *cobra.Command, out io.Writer) { for _, cmd := range baseCmd.Commands() { if cmd.Name() == c.Name() { - fmt.Fprintf(os.Stderr, "failed to load plugins: name conflicts %s\n", c.Name()) + slog.Error("failed to load plugins: name conflicts", slog.String("name", c.Name())) return } } From 374aeb4b4e0463f72e3a0175138ed4bf7e87a156 Mon Sep 17 00:00:00 2001 From: Feruzjon Muyassarov Date: Wed, 22 Oct 2025 11:00:54 +0300 Subject: [PATCH 12/18] Fix rollback for missing resources Signed-off-by: Feruzjon Muyassarov --- pkg/kube/client.go | 31 +++++++++++++++++++++++++++-- pkg/kube/client_test.go | 44 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 69 insertions(+), 6 deletions(-) diff --git a/pkg/kube/client.go b/pkg/kube/client.go index d348423ab..e3937a923 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -532,7 +532,32 @@ func (c *Client) update(originals, targets ResourceList, updateApplyFunc UpdateA original := originals.Get(target) if original == nil { kind := target.Mapping.GroupVersionKind.Kind - return fmt.Errorf("original object %s with the name %q not found", kind, target.Name) + + slog.Warn("resource exists on cluster but not in original release, using cluster state as baseline", + "namespace", target.Namespace, "name", target.Name, "kind", kind) + + currentObj, err := helper.Get(target.Namespace, target.Name) + if err != nil { + return fmt.Errorf("original object %s with the name %q not found", kind, target.Name) + } + + // Create a temporary Info with the current cluster state to use as "original" + currentInfo := &resource.Info{ + Client: target.Client, + Mapping: target.Mapping, + Namespace: target.Namespace, + Name: target.Name, + Object: currentObj, + } + + if err := updateApplyFunc(currentInfo, target); err != nil { + updateErrors = append(updateErrors, err) + } + + // Because we check for errors later, append the info regardless + res.Updated = append(res.Updated, target) + + return nil } if err := updateApplyFunc(original, target); err != nil { @@ -570,7 +595,9 @@ func (c *Client) update(originals, targets ResourceList, updateApplyFunc UpdateA if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil { slog.Debug("failed to delete resource", "namespace", info.Namespace, "name", info.Name, "kind", info.Mapping.GroupVersionKind.Kind, slog.Any("error", err)) if !apierrors.IsNotFound(err) { - updateErrors = append(updateErrors, fmt.Errorf("failed to delete resource %s: %w", info.Name, err)) + updateErrors = append(updateErrors, fmt.Errorf( + "failed to delete resource namespace=%s, name=%s, kind=%s: %w", + info.Namespace, info.Name, info.Mapping.GroupVersionKind.Kind, err)) } continue } diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 3934171be..cc34e3360 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -412,7 +412,25 @@ func TestUpdate(t *testing.T) { "/namespaces/default/pods/forbidden:GET", "/namespaces/default/pods/forbidden:DELETE", ), - ExpectedError: "failed to delete resource forbidden:", + ExpectedError: "failed to delete resource namespace=default, name=forbidden, kind=Pod:", + }, + "rollback after failed upgrade with removed resource": { + // Simulates rollback scenario: + // - Revision 1 had "newpod" + // - Revision 2 removed "newpod" but upgrade failed (OriginalPods is empty) + // - Cluster still has "newpod" from Revision 1 + // - Rolling back to Revision 1 (TargetPods with "newpod") should succeed + OriginalPods: v1.PodList{}, // Revision 2 (failed) - resource was removed + TargetPods: newPodList("newpod"), // Revision 1 - rolling back to this + ThreeWayMergeForUnstructured: false, + ServerSideApply: true, + ExpectedActions: []string{ + "/namespaces/default/pods/newpod:GET", // Check if resource exists + "/namespaces/default/pods/newpod:GET", // Get current state (first call in update path) + "/namespaces/default/pods/newpod:GET", // Get current cluster state to use as baseline + "/namespaces/default/pods/newpod:PATCH", // Update using cluster state as baseline + }, + ExpectedError: "", }, } @@ -429,6 +447,10 @@ func TestUpdate(t *testing.T) { p, m := req.URL.Path, req.Method switch { + case p == "/namespaces/default/pods/newpod" && m == http.MethodGet: + return newResponse(http.StatusOK, &listTarget.Items[0]) + case p == "/namespaces/default/pods/newpod" && m == http.MethodPatch: + return newResponse(http.StatusOK, &listTarget.Items[0]) case p == "/namespaces/default/pods/starfish" && m == http.MethodGet: return newResponse(http.StatusOK, &listOriginal.Items[0]) case p == "/namespaces/default/pods/otter" && m == http.MethodGet: @@ -519,9 +541,23 @@ func TestUpdate(t *testing.T) { require.NoError(t, err) } - assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created)) - assert.Len(t, result.Updated, 2, "expected 2 resource updated, got %d", len(result.Updated)) - assert.Len(t, result.Deleted, 1, "expected 1 resource deleted, got %d", len(result.Deleted)) + // Special handling for the rollback test case + if name == "rollback after failed upgrade with removed resource" { + assert.Len(t, result.Created, 0, "expected 0 resource created, got %d", len(result.Created)) + assert.Len(t, result.Updated, 1, "expected 1 resource updated, got %d", len(result.Updated)) + assert.Len(t, result.Deleted, 0, "expected 0 resource deleted, got %d", len(result.Deleted)) + } else { + assert.Len(t, result.Created, 1, "expected 1 resource created, got %d", len(result.Created)) + assert.Len(t, result.Updated, 2, "expected 2 resource updated, got %d", len(result.Updated)) + assert.Len(t, result.Deleted, 1, "expected 1 resource deleted, got %d", len(result.Deleted)) + } + + if tc.ExpectedError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.ExpectedError) + } else { + require.NoError(t, err) + } actions := []string{} for _, action := range client.Actions { From e751a70e84175212d9338738122d045aecb2ad89 Mon Sep 17 00:00:00 2001 From: Evans Mungai Date: Fri, 9 Jan 2026 19:32:29 +0000 Subject: [PATCH 13/18] fix(release): fix test compilation error Signed-off-by: Evans Mungai --- pkg/action/release_testing_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/action/release_testing_test.go b/pkg/action/release_testing_test.go index 1ada78a4b..bece0b475 100644 --- a/pkg/action/release_testing_test.go +++ b/pkg/action/release_testing_test.go @@ -46,7 +46,7 @@ func TestReleaseTestingRun_UnreachableKubeClient(t *testing.T) { config.KubeClient = &failingKubeClient client := NewReleaseTesting(config) - result, err := client.Run("") + result, _, err := client.Run("") assert.Nil(t, result) assert.Error(t, err) } From 46e52649eaf7702b37f38302a318171ee88f4710 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Jan 2026 21:37:20 +0000 Subject: [PATCH 14/18] chore(deps): bump golang.org/x/text from 0.32.0 to 0.33.0 Bumps [golang.org/x/text](https://github.com/golang/text) from 0.32.0 to 0.33.0. - [Release notes](https://github.com/golang/text/releases) - [Commits](https://github.com/golang/text/compare/v0.32.0...v0.33.0) --- updated-dependencies: - dependency-name: golang.org/x/text dependency-version: 0.33.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 8b5b2f4a3..8b4b44af8 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( go.yaml.in/yaml/v3 v3.0.4 golang.org/x/crypto v0.46.0 golang.org/x/term v0.38.0 - golang.org/x/text v0.32.0 + golang.org/x/text v0.33.0 gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.35.0 k8s.io/apiextensions-apiserver v0.35.0 @@ -157,13 +157,13 @@ require ( go.opentelemetry.io/otel/trace v1.37.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.47.0 // indirect + golang.org/x/mod v0.31.0 // indirect + golang.org/x/net v0.48.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.39.0 // indirect golang.org/x/time v0.12.0 // indirect - golang.org/x/tools v0.39.0 // indirect + golang.org/x/tools v0.40.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect google.golang.org/grpc v1.72.2 // indirect diff --git a/go.sum b/go.sum index 4e39db6d8..b0b582ef8 100644 --- a/go.sum +++ b/go.sum @@ -382,8 +382,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -394,8 +394,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -443,8 +443,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -453,8 +453,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= From de0becdc57de8e39f44d2b0f645eba2e3272ad5a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 10 Jan 2026 10:33:37 +0000 Subject: [PATCH 15/18] chore(deps): bump golang.org/x/term from 0.38.0 to 0.39.0 Bumps [golang.org/x/term](https://github.com/golang/term) from 0.38.0 to 0.39.0. - [Commits](https://github.com/golang/term/compare/v0.38.0...v0.39.0) --- updated-dependencies: - dependency-name: golang.org/x/term dependency-version: 0.39.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 8b4b44af8..81162118a 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/tetratelabs/wazero v1.11.0 go.yaml.in/yaml/v3 v3.0.4 golang.org/x/crypto v0.46.0 - golang.org/x/term v0.38.0 + golang.org/x/term v0.39.0 golang.org/x/text v0.33.0 gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.35.0 @@ -161,7 +161,7 @@ require ( golang.org/x/net v0.48.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.19.0 // indirect - golang.org/x/sys v0.39.0 // indirect + golang.org/x/sys v0.40.0 // indirect golang.org/x/time v0.12.0 // indirect golang.org/x/tools v0.40.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect diff --git a/go.sum b/go.sum index b0b582ef8..abeb76478 100644 --- a/go.sum +++ b/go.sum @@ -425,8 +425,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -434,8 +434,8 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= From 59ece92bed72767332ddc827c5548e7355ffb09a Mon Sep 17 00:00:00 2001 From: Matheus Pimenta Date: Fri, 9 Jan 2026 01:18:33 +0000 Subject: [PATCH 16/18] pkg/kube: introduce support for custom kstatus readers Signed-off-by: Matheus Pimenta --- pkg/action/hooks.go | 44 +++-- pkg/action/hooks_test.go | 55 +++++- pkg/action/install.go | 21 +- pkg/action/install_test.go | 22 +++ pkg/action/release_testing.go | 7 +- pkg/action/release_testing_test.go | 28 +++ pkg/action/rollback.go | 12 +- pkg/action/rollback_test.go | 40 ++++ pkg/action/uninstall.go | 13 +- pkg/action/uninstall_test.go | 38 ++++ pkg/action/upgrade.go | 14 +- pkg/action/upgrade_test.go | 27 +++ pkg/kube/client.go | 31 ++- pkg/kube/client_test.go | 123 ++++++++++-- pkg/kube/fake/failing_kube_client.go | 10 +- pkg/kube/fake/printer.go | 6 +- pkg/kube/interface.go | 13 +- pkg/kube/options.go | 45 +++++ pkg/kube/statuswait.go | 15 +- pkg/kube/statuswait_test.go | 277 ++++++++++++++++++++++++++- 20 files changed, 782 insertions(+), 59 deletions(-) create mode 100644 pkg/kube/options.go diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 49849a27d..28033395b 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -32,8 +32,11 @@ import ( ) // execHook executes all of the hooks for the given hook event. -func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) error { - shutdown, err := cfg.execHookWithDelayedShutdown(rl, hook, waitStrategy, timeout, serverSideApply) +func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, + waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, + timeout time.Duration, serverSideApply bool) error { + + shutdown, err := cfg.execHookWithDelayedShutdown(rl, hook, waitStrategy, waitOptions, timeout, serverSideApply) if shutdown == nil { return err } @@ -53,7 +56,10 @@ func shutdownNoOp() error { } // execHookWithDelayedShutdown executes all of the hooks for the given hook event and returns a shutdownHook function to trigger deletions after doing other things like e.g. retrieving logs. -func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook release.HookEvent, waitStrategy kube.WaitStrategy, timeout time.Duration, serverSideApply bool) (ExecuteShutdownFunc, error) { +func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook release.HookEvent, + waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, timeout time.Duration, + serverSideApply bool) (ExecuteShutdownFunc, error) { + executingHooks := []*release.Hook{} for _, h := range rl.Hooks { @@ -71,7 +77,7 @@ func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook // Set default delete policy to before-hook-creation cfg.hookSetDeletePolicy(h) - if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, timeout); err != nil { + if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, waitStrategy, waitOptions, timeout); err != nil { return shutdownNoOp, err } @@ -101,7 +107,12 @@ func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook return shutdownNoOp, fmt.Errorf("warning: Hook %s %s failed: %w", hook, h.Path, err) } - waiter, err := cfg.KubeClient.GetWaiter(waitStrategy) + var waiter kube.Waiter + if c, supportsOptions := cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(waitStrategy, waitOptions...) + } else { + waiter, err = cfg.KubeClient.GetWaiter(waitStrategy) + } if err != nil { return shutdownNoOp, fmt.Errorf("unable to get waiter: %w", err) } @@ -120,14 +131,14 @@ func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // under failed condition. If so, then clear the corresponding resource object in the hook return func() error { - if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, timeout); errDeleting != nil { + if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, waitStrategy, waitOptions, timeout); errDeleting != nil { // We log the error here as we want to propagate the hook failure upwards to the release object. log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) } // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hooks // should be deleted under succeeded condition. - if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, waitStrategy, timeout); err != nil { + if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, waitStrategy, waitOptions, timeout); err != nil { return err } return err @@ -145,7 +156,7 @@ func (cfg *Configuration) execHookWithDelayedShutdown(rl *release.Release, hook // We log here as we still want to attempt hook resource deletion even if output logging fails. log.Printf("error outputting logs for hook failure: %v", err) } - if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, timeout); err != nil { + if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, waitStrategy, waitOptions, timeout); err != nil { return err } } @@ -166,7 +177,9 @@ func (x hookByWeight) Less(i, j int) bool { } // deleteHookByPolicy deletes a hook if the hook policy instructs it to -func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error { +func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, + waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, timeout time.Duration) error { + // Never delete CustomResourceDefinitions; this could cause lots of // cascading garbage collection. if h.Kind == "CustomResourceDefinition" { @@ -182,7 +195,12 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo return joinErrors(errs, "; ") } - waiter, err := cfg.KubeClient.GetWaiter(waitStrategy) + var waiter kube.Waiter + if c, supportsOptions := cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(waitStrategy, waitOptions...) + } else { + waiter, err = cfg.KubeClient.GetWaiter(waitStrategy) + } if err != nil { return err } @@ -194,9 +212,11 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo } // deleteHooksByPolicy deletes all hooks if the hook policy instructs it to -func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy, waitStrategy kube.WaitStrategy, timeout time.Duration) error { +func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy, + waitStrategy kube.WaitStrategy, waitOptions []kube.WaitOption, timeout time.Duration) error { + for _, h := range hooks { - if err := cfg.deleteHookByPolicy(h, policy, waitStrategy, timeout); err != nil { + if err := cfg.deleteHookByPolicy(h, policy, waitStrategy, waitOptions, timeout); err != nil { return err } } diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index 02b70dda1..0270a0630 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -18,6 +18,7 @@ package action import ( "bytes" + "context" "fmt" "io" "reflect" @@ -278,8 +279,8 @@ func (h *HookFailingKubeClient) Delete(resources kube.ResourceList, deletionProp return h.PrintingKubeClient.Delete(resources, deletionPropagation) } -func (h *HookFailingKubeClient) GetWaiter(strategy kube.WaitStrategy) (kube.Waiter, error) { - waiter, _ := h.PrintingKubeClient.GetWaiter(strategy) +func (h *HookFailingKubeClient) GetWaiterWithOptions(strategy kube.WaitStrategy, opts ...kube.WaitOption) (kube.Waiter, error) { + waiter, _ := h.PrintingKubeClient.GetWaiterWithOptions(strategy, opts...) return &HookFailingKubeWaiter{ PrintingKubeWaiter: waiter.(*kubefake.PrintingKubeWaiter), failOn: h.failOn, @@ -394,7 +395,7 @@ data: } serverSideApply := true - err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, 600, serverSideApply) + err := configuration.execHook(&tc.inputRelease, hookEvent, kube.StatusWatcherStrategy, nil, 600, serverSideApply) if !reflect.DeepEqual(kubeClient.deleteRecord, tc.expectedDeleteRecord) { t.Fatalf("Got unexpected delete record, expected: %#v, but got: %#v", kubeClient.deleteRecord, tc.expectedDeleteRecord) @@ -442,3 +443,51 @@ func TestConfiguration_hookSetDeletePolicy(t *testing.T) { }) } } + +func TestExecHook_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + + failer := &kubefake.FailingKubeClient{ + PrintingKubeClient: kubefake.PrintingKubeClient{Out: io.Discard}, + } + + configuration := &Configuration{ + Releases: storage.Init(driver.NewMemory()), + KubeClient: failer, + Capabilities: common.DefaultCapabilities, + } + + rel := &release.Release{ + Name: "test-release", + Namespace: "test", + Hooks: []*release.Hook{ + { + Name: "test-hook", + Kind: "ConfigMap", + Path: "templates/hook.yaml", + Manifest: `apiVersion: v1 +kind: ConfigMap +metadata: + name: test-hook + namespace: test +data: + foo: bar +`, + Weight: 0, + Events: []release.HookEvent{ + release.HookPreInstall, + }, + }, + }, + } + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + waitOptions := []kube.WaitOption{kube.WithWaitContext(ctx)} + + err := configuration.execHook(rel, release.HookPreInstall, kube.StatusWatcherStrategy, waitOptions, 600, false) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/install.go b/pkg/action/install.go index b379d6873..38355491a 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -95,6 +95,7 @@ type Install struct { DisableHooks bool Replace bool WaitStrategy kube.WaitStrategy + WaitOptions []kube.WaitOption WaitForJobs bool Devel bool DependencyUpdate bool @@ -201,7 +202,13 @@ func (i *Install) installCRDs(crds []chart.CRD) error { totalItems = append(totalItems, res...) } if len(totalItems) > 0 { - waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + var waiter kube.Waiter + var err error + if c, supportsOptions := i.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(i.WaitStrategy, i.WaitOptions...) + } else { + waiter, err = i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + } if err != nil { return fmt.Errorf("unable to get waiter: %w", err) } @@ -480,7 +487,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource var err error // pre-install hooks if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.Timeout, i.ServerSideApply); err != nil { + if err := i.cfg.execHook(rel, release.HookPreInstall, i.WaitStrategy, i.WaitOptions, i.Timeout, i.ServerSideApply); err != nil { return rel, fmt.Errorf("failed pre-install: %s", err) } } @@ -506,7 +513,12 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource return rel, err } - waiter, err := i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + var waiter kube.Waiter + if c, supportsOptions := i.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(i.WaitStrategy, i.WaitOptions...) + } else { + waiter, err = i.cfg.KubeClient.GetWaiter(i.WaitStrategy) + } if err != nil { return rel, fmt.Errorf("failed to get waiter: %w", err) } @@ -521,7 +533,7 @@ func (i *Install) performInstall(rel *release.Release, toBeAdopted kube.Resource } if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.Timeout, i.ServerSideApply); err != nil { + if err := i.cfg.execHook(rel, release.HookPostInstall, i.WaitStrategy, i.WaitOptions, i.Timeout, i.ServerSideApply); err != nil { return rel, fmt.Errorf("failed post-install: %s", err) } } @@ -555,6 +567,7 @@ func (i *Install) failRelease(rel *release.Release, err error) (*release.Release uninstall.KeepHistory = false uninstall.Timeout = i.Timeout uninstall.WaitStrategy = i.WaitStrategy + uninstall.WaitOptions = i.WaitOptions if _, uninstallErr := uninstall.Run(i.ReleaseName); uninstallErr != nil { return rel, fmt.Errorf("an error occurred while uninstalling the release. original install error: %w: %w", err, uninstallErr) } diff --git a/pkg/action/install_test.go b/pkg/action/install_test.go index 9a0ca8d22..47080aef8 100644 --- a/pkg/action/install_test.go +++ b/pkg/action/install_test.go @@ -1186,3 +1186,25 @@ func TestCheckDependencies_MissingDependency(t *testing.T) { assert.ErrorContains(t, CheckDependencies(mockChart, []ci.Dependency{&dependency}), "missing in charts") } + +func TestInstallRelease_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + + instAction := installAction(t) + instAction.ReleaseName = "wait-options-test" + instAction.WaitStrategy = kube.StatusWatcherStrategy + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + instAction.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + // Access the underlying FailingKubeClient to check recorded options + failer := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) + + vals := map[string]interface{}{} + _, err := instAction.Run(buildChart(), vals) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/release_testing.go b/pkg/action/release_testing.go index 992cdd701..043a41236 100644 --- a/pkg/action/release_testing.go +++ b/pkg/action/release_testing.go @@ -41,8 +41,9 @@ const ( // // It provides the implementation of 'helm test'. type ReleaseTesting struct { - cfg *Configuration - Timeout time.Duration + cfg *Configuration + Timeout time.Duration + WaitOptions []kube.WaitOption // Used for fetching logs from test pods Namespace string Filters map[string][]string @@ -102,7 +103,7 @@ func (r *ReleaseTesting) Run(name string) (ri.Releaser, ExecuteShutdownFunc, err } serverSideApply := rel.ApplyMethod == string(release.ApplyMethodServerSideApply) - shutdown, err := r.cfg.execHookWithDelayedShutdown(rel, release.HookTest, kube.StatusWatcherStrategy, r.Timeout, serverSideApply) + shutdown, err := r.cfg.execHookWithDelayedShutdown(rel, release.HookTest, kube.StatusWatcherStrategy, r.WaitOptions, r.Timeout, serverSideApply) if err != nil { rel.Hooks = append(skippedHooks, rel.Hooks...) diff --git a/pkg/action/release_testing_test.go b/pkg/action/release_testing_test.go index bece0b475..ab35e104a 100644 --- a/pkg/action/release_testing_test.go +++ b/pkg/action/release_testing_test.go @@ -18,6 +18,7 @@ package action import ( "bytes" + "context" "errors" "io" "os" @@ -27,6 +28,7 @@ import ( "github.com/stretchr/testify/require" "helm.sh/helm/v4/pkg/cli" + "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" release "helm.sh/helm/v4/pkg/release/v1" ) @@ -89,3 +91,29 @@ func TestReleaseTestingGetPodLogs_PodRetrievalError(t *testing.T) { require.ErrorContains(t, client.GetPodLogs(&bytes.Buffer{}, &release.Release{Hooks: hooks}), "unable to get pod logs") } + +func TestReleaseTesting_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + config := actionConfigFixture(t) + + // Create a release with a test hook + rel := releaseStub() + rel.Name = "wait-options-test-release" + rel.ApplyMethod = "csa" + require.NoError(t, config.Releases.Create(rel)) + + client := NewReleaseTesting(config) + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + client.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + // Access the underlying FailingKubeClient to check recorded options + failer := config.KubeClient.(*kubefake.FailingKubeClient) + + _, _, err := client.Run(rel.Name) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index 4cdb2d33b..03150532e 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -40,6 +40,7 @@ type Rollback struct { Version int Timeout time.Duration WaitStrategy kube.WaitStrategy + WaitOptions []kube.WaitOption WaitForJobs bool DisableHooks bool // DryRunStrategy can be set to prepare, but not execute the operation and whether or not to interact with the remote cluster @@ -210,7 +211,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // pre-rollback hooks if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.Timeout, serverSideApply); err != nil { + if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.WaitStrategy, r.WaitOptions, r.Timeout, serverSideApply); err != nil { return targetRelease, err } } else { @@ -251,7 +252,12 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas return targetRelease, err } - waiter, err := r.cfg.KubeClient.GetWaiter(r.WaitStrategy) + var waiter kube.Waiter + if c, supportsOptions := r.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(r.WaitStrategy, r.WaitOptions...) + } else { + waiter, err = r.cfg.KubeClient.GetWaiter(r.WaitStrategy) + } if err != nil { return nil, fmt.Errorf("unable to get waiter: %w", err) } @@ -273,7 +279,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // post-rollback hooks if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.Timeout, serverSideApply); err != nil { + if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.WaitStrategy, r.WaitOptions, r.Timeout, serverSideApply); err != nil { return targetRelease, err } } diff --git a/pkg/action/rollback_test.go b/pkg/action/rollback_test.go index 5158bee26..deb6c7c80 100644 --- a/pkg/action/rollback_test.go +++ b/pkg/action/rollback_test.go @@ -17,12 +17,15 @@ limitations under the License. package action import ( + "context" "errors" "io" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "helm.sh/helm/v4/pkg/kube" kubefake "helm.sh/helm/v4/pkg/kube/fake" ) @@ -43,3 +46,40 @@ func TestRollbackRun_UnreachableKubeClient(t *testing.T) { client := NewRollback(config) assert.Error(t, client.Run("")) } + +func TestRollback_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + config := actionConfigFixture(t) + + // Create a deployed release and a second version to roll back to + rel := releaseStub() + rel.Name = "wait-options-rollback" + rel.Info.Status = "deployed" + rel.ApplyMethod = "csa" + require.NoError(t, config.Releases.Create(rel)) + + rel2 := releaseStub() + rel2.Name = "wait-options-rollback" + rel2.Version = 2 + rel2.Info.Status = "deployed" + rel2.ApplyMethod = "csa" + require.NoError(t, config.Releases.Create(rel2)) + + client := NewRollback(config) + client.Version = 1 + client.WaitStrategy = kube.StatusWatcherStrategy + client.ServerSideApply = "auto" + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + client.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + // Access the underlying FailingKubeClient to check recorded options + failer := config.KubeClient.(*kubefake.FailingKubeClient) + + err := client.Run(rel.Name) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index efbc72fef..79156991c 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -45,6 +45,7 @@ type Uninstall struct { IgnoreNotFound bool KeepHistory bool WaitStrategy kube.WaitStrategy + WaitOptions []kube.WaitOption DeletionPropagation string Timeout time.Duration Description string @@ -63,7 +64,13 @@ func (u *Uninstall) Run(name string) (*releasei.UninstallReleaseResponse, error) return nil, err } - waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + var waiter kube.Waiter + var err error + if c, supportsOptions := u.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(u.WaitStrategy, u.WaitOptions...) + } else { + waiter, err = u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + } if err != nil { return nil, err } @@ -127,7 +134,7 @@ func (u *Uninstall) Run(name string) (*releasei.UninstallReleaseResponse, error) if !u.DisableHooks { serverSideApply := true - if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { + if err := u.cfg.execHook(rel, release.HookPreDelete, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil { return res, err } } else { @@ -157,7 +164,7 @@ func (u *Uninstall) Run(name string) (*releasei.UninstallReleaseResponse, error) if !u.DisableHooks { serverSideApply := true - if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { + if err := u.cfg.execHook(rel, release.HookPostDelete, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil { errs = append(errs, err) } } diff --git a/pkg/action/uninstall_test.go b/pkg/action/uninstall_test.go index 6c4ad977f..b5a76d983 100644 --- a/pkg/action/uninstall_test.go +++ b/pkg/action/uninstall_test.go @@ -17,6 +17,7 @@ limitations under the License. package action import ( + "context" "errors" "fmt" "io" @@ -169,3 +170,40 @@ func TestUninstallRun_UnreachableKubeClient(t *testing.T) { assert.Nil(t, result) assert.ErrorContains(t, err, "connection refused") } + +func TestUninstall_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + + unAction := uninstallAction(t) + unAction.DisableHooks = true + unAction.DryRun = false + unAction.WaitStrategy = kube.StatusWatcherStrategy + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + unAction.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + rel := releaseStub() + rel.Name = "wait-options-uninstall" + rel.Manifest = `{ + "apiVersion": "v1", + "kind": "Secret", + "metadata": { + "name": "secret" + }, + "type": "Opaque", + "data": { + "password": "password" + } + }` + require.NoError(t, unAction.cfg.Releases.Create(rel)) + + // Access the underlying FailingKubeClient to check recorded options + failer := unAction.cfg.KubeClient.(*kubefake.FailingKubeClient) + + _, err := unAction.Run(rel.Name) + is.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 13d28fd4d..4b99be603 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -72,6 +72,8 @@ type Upgrade struct { Timeout time.Duration // WaitStrategy determines what type of waiting should be done WaitStrategy kube.WaitStrategy + // WaitOptions are additional options for waiting on resources + WaitOptions []kube.WaitOption // WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested. WaitForJobs bool // DisableHooks disables hook processing if set to true. @@ -452,7 +454,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // pre-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { + if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil { u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err)) return } @@ -473,7 +475,12 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele return } - waiter, err := u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + var waiter kube.Waiter + if c, supportsOptions := u.cfg.KubeClient.(kube.InterfaceWaitOptions); supportsOptions { + waiter, err = c.GetWaiterWithOptions(u.WaitStrategy, u.WaitOptions...) + } else { + waiter, err = u.cfg.KubeClient.GetWaiter(u.WaitStrategy) + } if err != nil { u.cfg.recordRelease(originalRelease) u.reportToPerformUpgrade(c, upgradedRelease, results.Created, err) @@ -495,7 +502,7 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // post-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.Timeout, serverSideApply); err != nil { + if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.WaitStrategy, u.WaitOptions, u.Timeout, serverSideApply); err != nil { u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err)) return } @@ -570,6 +577,7 @@ func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, e rollin := NewRollback(u.cfg) rollin.Version = filteredHistory[0].Version rollin.WaitStrategy = u.WaitStrategy + rollin.WaitOptions = u.WaitOptions rollin.WaitForJobs = u.WaitForJobs rollin.DisableHooks = u.DisableHooks rollin.ForceReplace = u.ForceReplace diff --git a/pkg/action/upgrade_test.go b/pkg/action/upgrade_test.go index 17c4927cc..848e8a682 100644 --- a/pkg/action/upgrade_test.go +++ b/pkg/action/upgrade_test.go @@ -775,3 +775,30 @@ func TestObjectKey(t *testing.T) { assert.Equal(t, "apps/v1/Deployment/namespace/name", objectKey(&info)) } + +func TestUpgradeRelease_WaitOptionsPassedDownstream(t *testing.T) { + is := assert.New(t) + req := require.New(t) + + upAction := upgradeAction(t) + rel := releaseStub() + rel.Name = "wait-options-test" + rel.Info.Status = common.StatusDeployed + req.NoError(upAction.cfg.Releases.Create(rel)) + + upAction.WaitStrategy = kube.StatusWatcherStrategy + + // Use WithWaitContext as a marker WaitOption that we can track + ctx := context.Background() + upAction.WaitOptions = []kube.WaitOption{kube.WithWaitContext(ctx)} + + // Access the underlying FailingKubeClient to check recorded options + failer := upAction.cfg.KubeClient.(*kubefake.FailingKubeClient) + + vals := map[string]interface{}{} + _, err := upAction.Run(rel.Name, buildChart(), vals) + req.NoError(err) + + // Verify that WaitOptions were passed to GetWaiter + is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") +} diff --git a/pkg/kube/client.go b/pkg/kube/client.go index 6e09fceac..2fa855c1e 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -87,6 +87,8 @@ type Client struct { // WaitContext is an optional context to use for wait operations. // If not set, a context will be created internally using the // timeout provided to the wait functions. + // + // Deprecated: Use WithWaitContext wait option when getting a Waiter instead. WaitContext context.Context Waiter @@ -139,7 +141,11 @@ func init() { } } -func (c *Client) newStatusWatcher() (*statusWaiter, error) { +func (c *Client) newStatusWatcher(opts ...WaitOption) (*statusWaiter, error) { + var o waitOptions + for _, opt := range opts { + opt(&o) + } cfg, err := c.Factory.ToRESTConfig() if err != nil { return nil, err @@ -156,14 +162,23 @@ func (c *Client) newStatusWatcher() (*statusWaiter, error) { if err != nil { return nil, err } + waitContext := o.ctx + if waitContext == nil { + waitContext = c.WaitContext + } return &statusWaiter{ restMapper: restMapper, client: dynamicClient, - ctx: c.WaitContext, + ctx: waitContext, + readers: o.statusReaders, }, nil } -func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) { +func (c *Client) GetWaiter(ws WaitStrategy) (Waiter, error) { + return c.GetWaiterWithOptions(ws) +} + +func (c *Client) GetWaiterWithOptions(strategy WaitStrategy, opts ...WaitOption) (Waiter, error) { switch strategy { case LegacyStrategy: kc, err := c.Factory.KubernetesClientSet() @@ -172,9 +187,9 @@ func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) { } return &legacyWaiter{kubeClient: kc, ctx: c.WaitContext}, nil case StatusWatcherStrategy: - return c.newStatusWatcher() + return c.newStatusWatcher(opts...) case HookOnlyStrategy: - sw, err := c.newStatusWatcher() + sw, err := c.newStatusWatcher(opts...) if err != nil { return nil, err } @@ -187,8 +202,12 @@ func (c *Client) GetWaiter(strategy WaitStrategy) (Waiter, error) { } func (c *Client) SetWaiter(ws WaitStrategy) error { + return c.SetWaiterWithOptions(ws) +} + +func (c *Client) SetWaiterWithOptions(ws WaitStrategy, opts ...WaitOption) error { var err error - c.Waiter, err = c.GetWaiter(ws) + c.Waiter, err = c.GetWaiterWithOptions(ws, opts...) if err != nil { return err } diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index d442864f8..fcfd16ee7 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -28,6 +28,10 @@ import ( "testing" "time" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/object" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -722,7 +726,7 @@ func TestWait(t *testing.T) { }), } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -783,7 +787,7 @@ func TestWaitJob(t *testing.T) { }), } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -845,7 +849,7 @@ func TestWaitDelete(t *testing.T) { }), } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) if err != nil { t.Fatal(err) } @@ -1852,7 +1856,7 @@ func TestClientWaitContextCancellationLegacy(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(&podList), false) @@ -1907,7 +1911,7 @@ func TestClientWaitWithJobsContextCancellationLegacy(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(job), false) @@ -1968,7 +1972,7 @@ func TestClientWaitForDeleteContextCancellationLegacy(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(&pod), false) @@ -2030,7 +2034,7 @@ func TestClientWaitContextNilDoesNotPanic(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(&podList), false) @@ -2080,7 +2084,7 @@ func TestClientWaitContextPreCancelledLegacy(t *testing.T) { } var err error - c.Waiter, err = c.GetWaiter(LegacyStrategy) + c.Waiter, err = c.GetWaiterWithOptions(LegacyStrategy) require.NoError(t, err) resources, err := c.Build(objBody(&podList), false) @@ -2111,7 +2115,7 @@ metadata: namespace: default ` var err error - c.Waiter, err = c.GetWaiter(StatusWatcherStrategy) + c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy) require.NoError(t, err) resources, err := c.Build(strings.NewReader(podManifest), false) @@ -2138,7 +2142,7 @@ metadata: namespace: default ` var err error - c.Waiter, err = c.GetWaiter(StatusWatcherStrategy) + c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy) require.NoError(t, err) resources, err := c.Build(strings.NewReader(jobManifest), false) @@ -2170,7 +2174,7 @@ status: phase: Running ` var err error - c.Waiter, err = c.GetWaiter(StatusWatcherStrategy) + c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy) require.NoError(t, err) resources, err := c.Build(strings.NewReader(podManifest), false) @@ -2182,3 +2186,100 @@ status: require.Error(t, err) assert.Contains(t, err.Error(), "context canceled", "expected context canceled error, got: %v", err) } + +// testStatusReader is a custom status reader for testing that returns a configurable status. +type testStatusReader struct { + supportedGK schema.GroupKind + status status.Status +} + +func (r *testStatusReader) Supports(gk schema.GroupKind) bool { + return gk == r.supportedGK +} + +func (r *testStatusReader) ReadStatus(_ context.Context, _ engine.ClusterReader, id object.ObjMetadata) (*event.ResourceStatus, error) { + return &event.ResourceStatus{ + Identifier: id, + Status: r.status, + Message: "test status reader", + }, nil +} + +func (r *testStatusReader) ReadStatusForObject(_ context.Context, _ engine.ClusterReader, u *unstructured.Unstructured) (*event.ResourceStatus, error) { + id := object.ObjMetadata{ + Namespace: u.GetNamespace(), + Name: u.GetName(), + GroupKind: u.GroupVersionKind().GroupKind(), + } + return &event.ResourceStatus{ + Identifier: id, + Status: r.status, + Message: "test status reader", + }, nil +} + +func TestClientStatusReadersPassedToStatusWaiter(t *testing.T) { + // This test verifies that Client.StatusReaders is correctly passed through + // to the statusWaiter when using the StatusWatcherStrategy. + // We use a custom status reader that immediately returns CurrentStatus for pods, + // which allows a pod without Ready condition to pass the wait. + podManifest := ` +apiVersion: v1 +kind: Pod +metadata: + name: test-pod + namespace: default +` + + c := newTestClient(t) + statusReaders := []engine.StatusReader{ + &testStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + } + + var err error + c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy, WithKStatusReaders(statusReaders...)) + require.NoError(t, err) + + resources, err := c.Build(strings.NewReader(podManifest), false) + require.NoError(t, err) + + // The pod has no Ready condition, but our custom reader returns CurrentStatus, + // so the wait should succeed immediately without timeout. + err = c.Wait(resources, time.Second*3) + require.NoError(t, err) +} + +func TestClientStatusReadersWithWaitWithJobs(t *testing.T) { + // This test verifies that Client.StatusReaders is correctly passed through + // to the statusWaiter when using WaitWithJobs. + jobManifest := ` +apiVersion: batch/v1 +kind: Job +metadata: + name: test-job + namespace: default +` + + c := newTestClient(t) + statusReaders := []engine.StatusReader{ + &testStatusReader{ + supportedGK: schema.GroupKind{Group: "batch", Kind: "Job"}, + status: status.CurrentStatus, + }, + } + + var err error + c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy, WithKStatusReaders(statusReaders...)) + require.NoError(t, err) + + resources, err := c.Build(strings.NewReader(jobManifest), false) + require.NoError(t, err) + + // The job has no Complete condition, but our custom reader returns CurrentStatus, + // so the wait should succeed immediately without timeout. + err = c.WaitWithJobs(resources, time.Second*3) + require.NoError(t, err) +} diff --git a/pkg/kube/fake/failing_kube_client.go b/pkg/kube/fake/failing_kube_client.go index 31d0082cc..0f7787f79 100644 --- a/pkg/kube/fake/failing_kube_client.go +++ b/pkg/kube/fake/failing_kube_client.go @@ -47,6 +47,8 @@ type FailingKubeClient struct { WaitForDeleteError error WatchUntilReadyError error WaitDuration time.Duration + // RecordedWaitOptions stores the WaitOptions passed to GetWaiter for testing + RecordedWaitOptions []kube.WaitOption } var _ kube.Interface = &FailingKubeClient{} @@ -153,7 +155,13 @@ func (f *FailingKubeClient) BuildTable(r io.Reader, _ bool) (kube.ResourceList, } func (f *FailingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) { - waiter, _ := f.PrintingKubeClient.GetWaiter(ws) + return f.GetWaiterWithOptions(ws) +} + +func (f *FailingKubeClient) GetWaiterWithOptions(ws kube.WaitStrategy, opts ...kube.WaitOption) (kube.Waiter, error) { + // Record the WaitOptions for testing + f.RecordedWaitOptions = append(f.RecordedWaitOptions, opts...) + waiter, _ := f.PrintingKubeClient.GetWaiterWithOptions(ws, opts...) printingKubeWaiter, _ := waiter.(*PrintingKubeWaiter) return &FailingKubeWaiter{ PrintingKubeWaiter: printingKubeWaiter, diff --git a/pkg/kube/fake/printer.go b/pkg/kube/fake/printer.go index a7aad1dac..e3fa11576 100644 --- a/pkg/kube/fake/printer.go +++ b/pkg/kube/fake/printer.go @@ -148,7 +148,11 @@ func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.Resource return &kube.Result{Deleted: resources}, nil } -func (p *PrintingKubeClient) GetWaiter(_ kube.WaitStrategy) (kube.Waiter, error) { +func (p *PrintingKubeClient) GetWaiter(ws kube.WaitStrategy) (kube.Waiter, error) { + return p.GetWaiterWithOptions(ws) +} + +func (p *PrintingKubeClient) GetWaiterWithOptions(_ kube.WaitStrategy, _ ...kube.WaitOption) (kube.Waiter, error) { return &PrintingKubeWaiter{Out: p.Out, LogOutput: p.LogOutput}, nil } diff --git a/pkg/kube/interface.go b/pkg/kube/interface.go index cc934ae1e..63c784751 100644 --- a/pkg/kube/interface.go +++ b/pkg/kube/interface.go @@ -56,7 +56,7 @@ type Interface interface { // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error - // Get Waiter gets the Kube.Waiter + // GetWaiter gets the Kube.Waiter. GetWaiter(ws WaitStrategy) (Waiter, error) // GetPodList lists all pods that match the specified listOptions @@ -99,3 +99,14 @@ type Waiter interface { // error. WatchUntilReady(resources ResourceList, timeout time.Duration) error } + +// InterfaceWaitOptions defines an interface that extends Interface with +// methods that accept wait options. +// +// TODO Helm 5: Remove InterfaceWaitOptions and integrate its method(s) into the Interface. +type InterfaceWaitOptions interface { + // GetWaiter gets the Kube.Waiter with options. + GetWaiterWithOptions(ws WaitStrategy, opts ...WaitOption) (Waiter, error) +} + +var _ InterfaceWaitOptions = (*Client)(nil) diff --git a/pkg/kube/options.go b/pkg/kube/options.go new file mode 100644 index 000000000..49c6229ba --- /dev/null +++ b/pkg/kube/options.go @@ -0,0 +1,45 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "context" + + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" +) + +// WaitOption is a function that configures an option for waiting on resources. +type WaitOption func(*waitOptions) + +// WithWaitContext sets the context for waiting on resources. +func WithWaitContext(ctx context.Context) WaitOption { + return func(wo *waitOptions) { + wo.ctx = ctx + } +} + +// WithKStatusReaders sets the status readers to be used while waiting on resources. +func WithKStatusReaders(readers ...engine.StatusReader) WaitOption { + return func(wo *waitOptions) { + wo.statusReaders = readers + } +} + +type waitOptions struct { + ctx context.Context + statusReaders []engine.StatusReader +} diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index a518f0c04..5acc6f19d 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -45,6 +45,7 @@ type statusWaiter struct { client dynamic.Interface restMapper meta.RESTMapper ctx context.Context + readers []engine.StatusReader } // DefaultStatusWatcherTimeout is the timeout used by the status waiter when a @@ -71,15 +72,13 @@ func (w *statusWaiter) WatchUntilReady(resourceList ResourceList, timeout time.D sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) jobSR := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) podSR := helmStatusReaders.NewCustomPodStatusReader(w.restMapper) - // We don't want to wait on any other resources as watchUntilReady is only for Helm hooks + // We don't want to wait on any other resources as watchUntilReady is only for Helm hooks. + // If custom readers are defined they can be used as Helm hooks support any resource. + // We put them in front since the DelegatingStatusReader uses the first reader that matches. genericSR := statusreaders.NewGenericStatusReader(w.restMapper, alwaysReady) sr := &statusreaders.DelegatingStatusReader{ - StatusReaders: []engine.StatusReader{ - jobSR, - podSR, - genericSR, - }, + StatusReaders: append(w.readers, jobSR, podSR, genericSR), } sw.StatusReader = sr return w.wait(ctx, resourceList, sw) @@ -93,6 +92,7 @@ func (w *statusWaiter) Wait(resourceList ResourceList, timeout time.Duration) er defer cancel() slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) + sw.StatusReader = statusreaders.NewStatusReader(w.restMapper, w.readers...) return w.wait(ctx, resourceList, sw) } @@ -105,7 +105,8 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) - customSR := statusreaders.NewStatusReader(w.restMapper, newCustomJobStatusReader) + readers := append(w.readers, newCustomJobStatusReader) + customSR := statusreaders.NewStatusReader(w.restMapper, readers...) sw.StatusReader = customSR return w.wait(ctx, resourceList, sw) } diff --git a/pkg/kube/statuswait_test.go b/pkg/kube/statuswait_test.go index a8ff4e0e6..a4e7ff62c 100644 --- a/pkg/kube/statuswait_test.go +++ b/pkg/kube/statuswait_test.go @@ -14,15 +14,21 @@ See the License for the specific language governing permissions and limitations under the License. */ -package kube // import "helm.sh/helm/v3/pkg/kube" +package kube // import "helm.sh/helm/v4/pkg/kube" import ( + "context" "errors" "fmt" "strings" + "sync/atomic" "testing" "time" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/engine" + "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event" + "github.com/fluxcd/cli-utils/pkg/kstatus/status" + "github.com/fluxcd/cli-utils/pkg/object" "github.com/fluxcd/cli-utils/pkg/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -933,3 +939,272 @@ func TestStatusWaitMixedResources(t *testing.T) { }) } } + +// mockStatusReader is a custom status reader for testing that tracks when it's used +// and returns a configurable status for resources it supports. +type mockStatusReader struct { + supportedGK schema.GroupKind + status status.Status + callCount atomic.Int32 +} + +func (m *mockStatusReader) Supports(gk schema.GroupKind) bool { + return gk == m.supportedGK +} + +func (m *mockStatusReader) ReadStatus(_ context.Context, _ engine.ClusterReader, id object.ObjMetadata) (*event.ResourceStatus, error) { + m.callCount.Add(1) + return &event.ResourceStatus{ + Identifier: id, + Status: m.status, + Message: "mock status reader", + }, nil +} + +func (m *mockStatusReader) ReadStatusForObject(_ context.Context, _ engine.ClusterReader, u *unstructured.Unstructured) (*event.ResourceStatus, error) { + m.callCount.Add(1) + id := object.ObjMetadata{ + Namespace: u.GetNamespace(), + Name: u.GetName(), + GroupKind: u.GroupVersionKind().GroupKind(), + } + return &event.ResourceStatus{ + Identifier: id, + Status: m.status, + Message: "mock status reader", + }, nil +} + +func TestStatusWaitWithCustomReaders(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + customReader *mockStatusReader + expectErrs []error + }{ + { + name: "custom reader makes pod immediately current", + objManifests: []string{podNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + { + name: "custom reader returns in-progress status", + objManifests: []string{podCurrentManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.InProgressStatus, + }, + expectErrs: []error{errors.New("resource not ready, name: current-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + }, + { + name: "custom reader for different resource type is not used", + objManifests: []string{podCurrentManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.InProgressStatus, + }, + expectErrs: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWaiter := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: []engine.StatusReader{tt.customReader}, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := statusWaiter.Wait(resourceList, time.Second*3) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } +} + +func TestStatusWaitWithJobsAndCustomReaders(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + customReader *mockStatusReader + expectErrs []error + }{ + { + name: "custom reader makes job immediately current", + objManifests: []string{jobNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + { + name: "custom reader for pod works with WaitWithJobs", + objManifests: []string{podNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + { + name: "built-in job reader is still appended after custom readers", + objManifests: []string{jobCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWaiter := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: []engine.StatusReader{tt.customReader}, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := statusWaiter.WaitWithJobs(resourceList, time.Second*3) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } +} + +func TestWatchUntilReadyWithCustomReaders(t *testing.T) { + t.Parallel() + tests := []struct { + name string + objManifests []string + customReader *mockStatusReader + expectErrs []error + }{ + { + name: "custom reader makes job immediately current for hooks", + objManifests: []string{jobNoStatusManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + { + name: "custom reader makes pod immediately current for hooks", + objManifests: []string{podCurrentManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.CurrentStatus, + }, + expectErrs: nil, + }, + { + name: "custom reader takes precedence over built-in pod reader", + objManifests: []string{podCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.InProgressStatus, + }, + expectErrs: []error{errors.New("resource not ready, name: good-pod, kind: Pod, status: InProgress"), errors.New("context deadline exceeded")}, + }, + { + name: "custom reader takes precedence over built-in job reader", + objManifests: []string{jobCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.InProgressStatus, + }, + expectErrs: []error{errors.New("resource not ready, name: test, kind: Job, status: InProgress"), errors.New("context deadline exceeded")}, + }, + { + name: "custom reader for different resource type does not affect pods", + objManifests: []string{podCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(), + status: status.InProgressStatus, + }, + expectErrs: nil, + }, + { + name: "built-in readers still work when custom reader does not match", + objManifests: []string{jobCompleteManifest}, + customReader: &mockStatusReader{ + supportedGK: v1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + status: status.InProgressStatus, + }, + expectErrs: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + batchv1.SchemeGroupVersion.WithKind("Job"), + ) + statusWaiter := statusWaiter{ + client: fakeClient, + restMapper: fakeMapper, + readers: []engine.StatusReader{tt.customReader}, + } + objs := getRuntimeObjFromManifests(t, tt.objManifests) + for _, obj := range objs { + u := obj.(*unstructured.Unstructured) + gvr := getGVR(t, fakeMapper, u) + err := fakeClient.Tracker().Create(gvr, u, u.GetNamespace()) + assert.NoError(t, err) + } + resourceList := getResourceListFromRuntimeObjs(t, c, objs) + err := statusWaiter.WatchUntilReady(resourceList, time.Second*3) + if tt.expectErrs != nil { + assert.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + assert.NoError(t, err) + }) + } +} From f46f1ce55d690a6c6f7e9b602757f9cf844492cb Mon Sep 17 00:00:00 2001 From: Evans Mungai Date: Mon, 12 Jan 2026 18:32:31 +0000 Subject: [PATCH 17/18] Update pkg/kube/statuswait.go Signed-off-by: Evans Mungai --- pkg/kube/statuswait.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/kube/statuswait.go b/pkg/kube/statuswait.go index 5acc6f19d..75c18de81 100644 --- a/pkg/kube/statuswait.go +++ b/pkg/kube/statuswait.go @@ -105,7 +105,8 @@ func (w *statusWaiter) WaitWithJobs(resourceList ResourceList, timeout time.Dura slog.Debug("waiting for resources", "count", len(resourceList), "timeout", timeout) sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper) newCustomJobStatusReader := helmStatusReaders.NewCustomJobStatusReader(w.restMapper) - readers := append(w.readers, newCustomJobStatusReader) + readers := append([]engine.StatusReader(nil), w.readers...) + readers = append(readers, newCustomJobStatusReader) customSR := statusreaders.NewStatusReader(w.restMapper, readers...) sw.StatusReader = customSR return w.wait(ctx, resourceList, sw) From da1d68adea91ab13b308c059c39381d48045a73a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Jan 2026 02:06:52 +0000 Subject: [PATCH 18/18] chore(deps): bump golang.org/x/crypto from 0.46.0 to 0.47.0 Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.46.0 to 0.47.0. - [Commits](https://github.com/golang/crypto/compare/v0.46.0...v0.47.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-version: 0.47.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 81162118a..6e2d9c15d 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/stretchr/testify v1.11.1 github.com/tetratelabs/wazero v1.11.0 go.yaml.in/yaml/v3 v3.0.4 - golang.org/x/crypto v0.46.0 + golang.org/x/crypto v0.47.0 golang.org/x/term v0.39.0 golang.org/x/text v0.33.0 gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index abeb76478..b1e843f1b 100644 --- a/go.sum +++ b/go.sum @@ -376,8 +376,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=