From aa9e4bb42dfc99b4d5144bbcf6f2cf9b2395ba2b Mon Sep 17 00:00:00 2001 From: Gerard Nguyen Date: Sat, 15 Mar 2025 09:11:45 +1100 Subject: [PATCH] rebase Signed-off-by: Gerard Nguyen --- pkg/action/hooks.go | 116 +++++++++++++++++++---- pkg/action/hooks_test.go | 197 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 288 insertions(+), 25 deletions(-) diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 4fbe28bbe..d9c0eb085 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -17,13 +17,21 @@ package action import ( "bytes" + "fmt" + "log" + "slices" "sort" "time" + "helm.sh/helm/v4/pkg/kube" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/pkg/errors" + "gopkg.in/yaml.v3" - "helm.sh/helm/v3/pkg/release" - helmtime "helm.sh/helm/v3/pkg/time" + release "helm.sh/helm/v4/pkg/release/v1" + helmtime "helm.sh/helm/v4/pkg/time" ) // execHook executes all of the hooks for the given hook event. @@ -43,7 +51,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, for i, h := range executingHooks { // Set default delete policy to before-hook-creation - if h.DeletePolicies == nil || len(h.DeletePolicies) == 0 { + if len(h.DeletePolicies) == 0 { // TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion // resources. For all other resource types update in place if a // resource with the same name already exists and is owned by the @@ -51,7 +59,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation} } - if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation); err != nil { + if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, timeout); err != nil { return err } @@ -86,15 +94,21 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // Mark hook as succeeded or failed if err != nil { h.LastRun.Phase = release.HookPhaseFailed + // If a hook is failed, check the annotation of the hook to determine if we should copy the logs client side + if errOutputting := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnFailed); errOutputting != nil { + // We log the error here as we want to propagate the hook failure upwards to the release object. + log.Printf("error outputting logs for hook failure: %v", errOutputting) + } // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // under failed condition. If so, then clear the corresponding resource object in the hook - if err := cfg.deleteHookByPolicy(h, release.HookFailed); err != nil { - return err + if errDeleting := cfg.deleteHookByPolicy(h, release.HookFailed, timeout); errDeleting != nil { + // We log the error here as we want to propagate the hook failure upwards to the release object. + log.Printf("error deleting the hook resource on hook failure: %v", errDeleting) } - // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hook + // If a hook is failed, check the annotation of the previous successful hooks to determine whether the hooks // should be deleted under succeeded condition. - if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded); err != nil { + if err := cfg.deleteHooksByPolicy(executingHooks[0:i], release.HookSucceeded, timeout); err != nil { return err } @@ -104,9 +118,16 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, } // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted - // under succeeded condition. If so, then clear the corresponding resource object in each hook - if err := cfg.deleteHooksByPolicy(executingHooks, release.HookSucceeded); err != nil { - return err + // or output should be logged under succeeded condition. If so, then clear the corresponding resource object in each hook + for i := len(executingHooks) - 1; i >= 0; i-- { + h := executingHooks[i] + if err := cfg.outputLogsByPolicy(h, rl.Namespace, release.HookOutputOnSucceeded); err != nil { + // We log here as we still want to attempt hook resource deletion even if output logging fails. + log.Printf("error outputting logs for hook failure: %v", err) + } + if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, timeout); err != nil { + return err + } } return nil @@ -125,7 +146,7 @@ func (x hookByWeight) Less(i, j int) bool { } // deleteHookByPolicy deletes a hook if the hook policy instructs it to -func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy) error { +func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error { // Never delete CustomResourceDefinitions; this could cause lots of // cascading garbage collection. if h.Kind == "CustomResourceDefinition" { @@ -140,7 +161,25 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo if len(errs) > 0 { return errors.New(joinErrors(errs)) } + + // wait for resources until they are deleted to avoid conflicts + if kubeClient, ok := cfg.KubeClient.(kube.InterfaceExt); ok { + if err := kubeClient.WaitForDelete(resources, timeout); err != nil { + return err + } + } + } + return nil +} + +// deleteHooksByPolicy deletes all hooks if the hook policy instructs it to +func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error { + for _, h := range hooks { + if err := cfg.deleteHookByPolicy(h, policy, timeout); err != nil { + return err + } } + return nil } @@ -155,13 +194,56 @@ func hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool return false } -// deleteHooksByPolicy deletes all hooks if the hook policy instructs it to -func (cfg *Configuration) deleteHooksByPolicy(hooks []*release.Hook, policy release.HookDeletePolicy) error { - for _, h := range hooks { - if err := cfg.deleteHookByPolicy(h, policy); err != nil { +// outputLogsByPolicy outputs a pods logs if the hook policy instructs it to +func (cfg *Configuration) outputLogsByPolicy(h *release.Hook, releaseNamespace string, policy release.HookOutputLogPolicy) error { + if !hookHasOutputLogPolicy(h, policy) { + return nil + } + namespace, err := cfg.deriveNamespace(h, releaseNamespace) + if err != nil { + return err + } + switch h.Kind { + case "Job": + return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{LabelSelector: fmt.Sprintf("job-name=%s", h.Name)}) + case "Pod": + return cfg.outputContainerLogsForListOptions(namespace, metav1.ListOptions{FieldSelector: fmt.Sprintf("metadata.name=%s", h.Name)}) + default: + return nil + } +} + +func (cfg *Configuration) outputContainerLogsForListOptions(namespace string, listOptions metav1.ListOptions) error { + // TODO Helm 4: Remove this check when GetPodList and OutputContainerLogsForPodList are moved from InterfaceLogs to Interface + if kubeClient, ok := cfg.KubeClient.(kube.InterfaceLogs); ok { + podList, err := kubeClient.GetPodList(namespace, listOptions) + if err != nil { return err } + err = kubeClient.OutputContainerLogsForPodList(podList, namespace, cfg.HookOutputFunc) + return err } - return nil } + +func (cfg *Configuration) deriveNamespace(h *release.Hook, namespace string) (string, error) { + tmp := struct { + Metadata struct { + Namespace string + } + }{} + err := yaml.Unmarshal([]byte(h.Manifest), &tmp) + if err != nil { + return "", errors.Wrapf(err, "unable to parse metadata.namespace from kubernetes manifest for output logs hook %s", h.Path) + } + if tmp.Metadata.Namespace == "" { + return namespace, nil + } + return tmp.Metadata.Namespace, nil +} + +// hookHasOutputLogPolicy determines whether the defined hook output log policy matches the hook output log policies +// supported by helm. +func hookHasOutputLogPolicy(h *release.Hook, policy release.HookOutputLogPolicy) bool { + return slices.Contains(h.OutputLogPolicies, policy) +} diff --git a/pkg/action/hooks_test.go b/pkg/action/hooks_test.go index 3ef1c17cb..68379add8 100644 --- a/pkg/action/hooks_test.go +++ b/pkg/action/hooks_test.go @@ -17,24 +17,205 @@ limitations under the License. package action import ( + "bytes" + "fmt" "io" - "io/ioutil" "reflect" "testing" "time" + "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/cli-runtime/pkg/resource" - "helm.sh/helm/v3/pkg/chartutil" - "helm.sh/helm/v3/pkg/kube" - kubefake "helm.sh/helm/v3/pkg/kube/fake" - "helm.sh/helm/v3/pkg/release" - "helm.sh/helm/v3/pkg/storage" - "helm.sh/helm/v3/pkg/storage/driver" + chart "helm.sh/helm/v4/pkg/chart/v2" + chartutil "helm.sh/helm/v4/pkg/chart/v2/util" + "helm.sh/helm/v4/pkg/kube" + kubefake "helm.sh/helm/v4/pkg/kube/fake" + release "helm.sh/helm/v4/pkg/release/v1" + "helm.sh/helm/v4/pkg/storage" + "helm.sh/helm/v4/pkg/storage/driver" ) +func podManifestWithOutputLogs(hookDefinitions []release.HookOutputLogPolicy) string { + hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions) + return fmt.Sprintf(`kind: Pod +metadata: + name: finding-sharky, + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-output-log-policy": %s +spec: + containers: + - name: sharky-test + image: fake-image + cmd: fake-command`, hookDefinitionString) +} + +func podManifestWithOutputLogWithNamespace(hookDefinitions []release.HookOutputLogPolicy) string { + hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions) + return fmt.Sprintf(`kind: Pod +metadata: + name: finding-george + namespace: sneaky-namespace + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-output-log-policy": %s +spec: + containers: + - name: george-test + image: fake-image + cmd: fake-command`, hookDefinitionString) +} + +func jobManifestWithOutputLog(hookDefinitions []release.HookOutputLogPolicy) string { + hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions) + return fmt.Sprintf(`kind: Job +apiVersion: batch/v1 +metadata: + name: losing-religion + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-output-log-policy": %s +spec: + completions: 1 + parallelism: 1 + activeDeadlineSeconds: 30 + template: + spec: + containers: + - name: religion-container + image: religion-image + cmd: religion-command`, hookDefinitionString) +} + +func jobManifestWithOutputLogWithNamespace(hookDefinitions []release.HookOutputLogPolicy) string { + hookDefinitionString := convertHooksToCommaSeparated(hookDefinitions) + return fmt.Sprintf(`kind: Job +apiVersion: batch/v1 +metadata: + name: losing-religion + namespace: rem-namespace + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-output-log-policy": %s +spec: + completions: 1 + parallelism: 1 + activeDeadlineSeconds: 30 + template: + spec: + containers: + - name: religion-container + image: religion-image + cmd: religion-command`, hookDefinitionString) +} + +func convertHooksToCommaSeparated(hookDefinitions []release.HookOutputLogPolicy) string { + var commaSeparated string + for i, policy := range hookDefinitions { + if i+1 == len(hookDefinitions) { + commaSeparated += policy.String() + } else { + commaSeparated += policy.String() + "," + } + } + return commaSeparated +} + +func TestInstallRelease_HookOutputLogsOnFailure(t *testing.T) { + // Should output on failure with expected namespace if hook-failed is set + runInstallForHooksWithFailure(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "spaced", true) + runInstallForHooksWithFailure(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "sneaky-namespace", true) + runInstallForHooksWithFailure(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "spaced", true) + runInstallForHooksWithFailure(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "rem-namespace", true) + + // Should not output on failure with expected namespace if hook-succeed is set + runInstallForHooksWithFailure(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false) + runInstallForHooksWithFailure(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false) + runInstallForHooksWithFailure(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false) + runInstallForHooksWithFailure(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "", false) +} + +func TestInstallRelease_HookOutputLogsOnSuccess(t *testing.T) { + // Should output on success with expected namespace if hook-succeeded is set + runInstallForHooksWithSuccess(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "spaced", true) + runInstallForHooksWithSuccess(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "sneaky-namespace", true) + runInstallForHooksWithSuccess(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "spaced", true) + runInstallForHooksWithSuccess(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded}), "rem-namespace", true) + + // Should not output on success if hook-failed is set + runInstallForHooksWithSuccess(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false) + runInstallForHooksWithSuccess(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false) + runInstallForHooksWithSuccess(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false) + runInstallForHooksWithSuccess(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnFailed}), "", false) +} + +func TestInstallRelease_HooksOutputLogsOnSuccessAndFailure(t *testing.T) { + // Should output on success with expected namespace if hook-succeeded and hook-failed is set + runInstallForHooksWithSuccess(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true) + runInstallForHooksWithSuccess(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "sneaky-namespace", true) + runInstallForHooksWithSuccess(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true) + runInstallForHooksWithSuccess(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "rem-namespace", true) + + // Should output on failure if hook-succeeded and hook-failed is set + runInstallForHooksWithFailure(t, podManifestWithOutputLogs([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true) + runInstallForHooksWithFailure(t, podManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "sneaky-namespace", true) + runInstallForHooksWithFailure(t, jobManifestWithOutputLog([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "spaced", true) + runInstallForHooksWithFailure(t, jobManifestWithOutputLogWithNamespace([]release.HookOutputLogPolicy{release.HookOutputOnSucceeded, release.HookOutputOnFailed}), "rem-namespace", true) +} + +func runInstallForHooksWithSuccess(t *testing.T, manifest, expectedNamespace string, shouldOutput bool) { + var expectedOutput string + if shouldOutput { + expectedOutput = fmt.Sprintf("attempted to output logs for namespace: %s", expectedNamespace) + } + is := assert.New(t) + instAction := installAction(t) + instAction.ReleaseName = "failed-hooks" + outBuffer := &bytes.Buffer{} + instAction.cfg.KubeClient = &kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer} + + templates := []*chart.File{ + {Name: "templates/hello", Data: []byte("hello: world")}, + {Name: "templates/hooks", Data: []byte(manifest)}, + } + vals := map[string]interface{}{} + + res, err := instAction.Run(buildChartWithTemplates(templates), vals) + is.NoError(err) + is.Equal(expectedOutput, outBuffer.String()) + is.Equal(release.StatusDeployed, res.Info.Status) +} + +func runInstallForHooksWithFailure(t *testing.T, manifest, expectedNamespace string, shouldOutput bool) { + var expectedOutput string + if shouldOutput { + expectedOutput = fmt.Sprintf("attempted to output logs for namespace: %s", expectedNamespace) + } + is := assert.New(t) + instAction := installAction(t) + instAction.ReleaseName = "failed-hooks" + failingClient := instAction.cfg.KubeClient.(*kubefake.FailingKubeClient) + failingClient.WatchUntilReadyError = fmt.Errorf("failed watch") + instAction.cfg.KubeClient = failingClient + outBuffer := &bytes.Buffer{} + failingClient.PrintingKubeClient = kubefake.PrintingKubeClient{Out: io.Discard, LogOutput: outBuffer} + + templates := []*chart.File{ + {Name: "templates/hello", Data: []byte("hello: world")}, + {Name: "templates/hooks", Data: []byte(manifest)}, + } + vals := map[string]interface{}{} + + res, err := instAction.Run(buildChartWithTemplates(templates), vals) + is.Error(err) + is.Contains(res.Info.Description, "failed pre-install") + is.Equal(expectedOutput, outBuffer.String()) + is.Equal(release.StatusFailed, res.Info.Status) +} + type HookFailedError struct{} func (e *HookFailedError) Error() string { @@ -181,7 +362,7 @@ data: for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { kubeClient := &HookFailingKubeClient{ - kubefake.PrintingKubeClient{Out: ioutil.Discard}, tc.failOn, []resource.Info{}, + kubefake.PrintingKubeClient{Out: io.Discard}, tc.failOn, []resource.Info{}, } configuration := &Configuration{