diff --git a/cmd/helm/install.go b/cmd/helm/install.go index 281679e5c..6c75b5b9f 100644 --- a/cmd/helm/install.go +++ b/cmd/helm/install.go @@ -166,6 +166,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal f.StringVar(&client.Description, "description", "", "add a custom description") f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored") f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart") + f.BoolVar(&client.LogOnFail, "log-on-fail", false, "show failed job log when installing process fails") f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema") f.BoolVar(&client.Atomic, "atomic", false, "if set, the installation process deletes the installation on failure. The --wait flag will be set automatically if --atomic is used") f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed. By default, CRDs are installed if not already present") diff --git a/cmd/helm/rollback.go b/cmd/helm/rollback.go index ea4b75cb1..f2d101966 100644 --- a/cmd/helm/rollback.go +++ b/cmd/helm/rollback.go @@ -84,6 +84,7 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this rollback when rollback fails") + f.BoolVar(&client.LogOnFail, "log-on-fail", false, "show failed job log when rollback fails") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") return cmd diff --git a/cmd/helm/upgrade.go b/cmd/helm/upgrade.go index 02f4cf2a9..48b672a72 100644 --- a/cmd/helm/upgrade.go +++ b/cmd/helm/upgrade.go @@ -119,6 +119,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { instClient.SubNotes = client.SubNotes instClient.Description = client.Description instClient.DependencyUpdate = client.DependencyUpdate + instClient.LogOnFail = client.LogOnFail rel, err := runInstall(args, instClient, valueOpts, out) if err != nil { @@ -199,6 +200,12 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { rel, err := client.RunWithContext(ctx, args[0], ch, vals) if err != nil { + // if client.LogOnFail && outfmt == output.Table { + // for _, hook := range rel.Hooks { + // fmt.Fprintf(out, hook.LastRun.Logs) + // } + // } + return errors.Wrap(err, "UPGRADE FAILED") } @@ -229,6 +236,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command { f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically if --atomic is used") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this upgrade when upgrade fails") + f.BoolVar(&client.LogOnFail, "log-on-fail", false, "show failed job log when upgrade fails") f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent") f.StringVar(&client.Description, "description", "", "add a custom description") f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart") diff --git a/pkg/action/hooks.go b/pkg/action/hooks.go index 40c1ffdb6..0e31f2714 100644 --- a/pkg/action/hooks.go +++ b/pkg/action/hooks.go @@ -17,17 +17,24 @@ package action import ( "bytes" + "context" + "fmt" + "io" "sort" + "strings" "time" "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "helm.sh/helm/v3/pkg/kube" "helm.sh/helm/v3/pkg/release" helmtime "helm.sh/helm/v3/pkg/time" ) // execHook executes all of the hooks for the given hook event. -func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, timeout time.Duration) error { +func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, timeout time.Duration) (string, error) { executingHooks := []*release.Hook{} for _, h := range rl.Hooks { @@ -52,12 +59,12 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, } if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation); err != nil { - return err + return "", err } resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), true) if err != nil { - return errors.Wrapf(err, "unable to build kubernetes object for %s hook %s", hook, h.Path) + return "", errors.Wrapf(err, "unable to build kubernetes object for %s hook %s", hook, h.Path) } // Record the time at which the hook was applied to the cluster @@ -76,7 +83,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, if _, err := cfg.KubeClient.Create(resources); err != nil { h.LastRun.CompletedAt = helmtime.Now() h.LastRun.Phase = release.HookPhaseFailed - return errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path) + return "", errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path) } // Watch hook resources until they have completed @@ -86,12 +93,18 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // Mark hook as succeeded or failed if err != nil { h.LastRun.Phase = release.HookPhaseFailed + + logs, lerr := cfg.hookGetLogs(rl, h, resources) + if lerr != nil { + return logs, lerr + } + // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // under failed condition. If so, then clear the corresponding resource object in the hook if err := cfg.deleteHookByPolicy(h, release.HookFailed); err != nil { - return err + return logs, err } - return err + return logs, err } h.LastRun.Phase = release.HookPhaseSucceeded } @@ -100,11 +113,11 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // under succeeded condition. If so, then clear the corresponding resource object in each hook for _, h := range executingHooks { if err := cfg.deleteHookByPolicy(h, release.HookSucceeded); err != nil { - return err + return "", err } } - return nil + return "", nil } // hookByWeight is a sorter for hooks @@ -149,3 +162,83 @@ func hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool } return false } + +func (cfg *Configuration) hookGetLogs(rl *release.Release, h *release.Hook, resources kube.ResourceList) (string, error) { + client, err := cfg.KubernetesClientSet() + if err != nil { + return "", errors.Wrap(err, "unable to get kubernetes client to fetch pod logs") + } + + switch h.Kind { + case "Job": + for _, res := range resources { + versioned := kube.AsVersioned(res) + selector, err := kube.SelectorsForObject(versioned) + if err != nil { + // If no selector is returned, it means this object is + // definitely not a pod, so continue onward + continue + } + + pods, err := client.CoreV1().Pods(res.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: selector.String(), + }) + if err != nil { + return "", errors.Wrapf(err, "unable to get pods for object %s because an error occurred", res.Name) + } + + var logs []string + + for _, pod := range pods.Items { + log, err := cfg.hookGetPodLogs(rl, &pod) + if err != nil { + return "", err + } + + logs = append(logs, log) + } + + return strings.Join(logs, "\n"), nil + } + case "Pod": + pod, err := client.CoreV1().Pods(rl.Namespace).Get(context.Background(), h.Name, metav1.GetOptions{}) + if err != nil { + return "", errors.Wrapf(err, "unable to get pods for object %s because an error occurred", h.Name) + } + + return cfg.hookGetPodLogs(rl, pod) + default: + return "", nil + } + + return "", nil +} + +func (cfg *Configuration) hookGetPodLogs(rl *release.Release, pod *v1.Pod) (string, error) { + client, err := cfg.KubernetesClientSet() + if err != nil { + return "", errors.Wrap(err, "unable to get kubernetes client to fetch pod logs") + } + + var logs []string + + for _, container := range pod.Spec.Containers { + req := client.CoreV1().Pods(rl.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{ + Container: container.Name, + Follow: false, + }) + logReader, err := req.Stream(context.Background()) + if err != nil { + return "", errors.Wrapf(err, "unable to get pod logs for object %s/%s", pod.Name, container.Name) + } + defer logReader.Close() + + out, _ := io.ReadAll(logReader) + + logs = append(logs, fmt.Sprintf("HOOK LOGS: pod %s, container %s:\n%s", pod.Name, container.Name, string(out))) + + cfg.Log("HOOK LOGS: pod %s, container %s:\n%s", pod.Name, container.Name, string(out)) + } + + return strings.Join(logs, "\n"), nil +} diff --git a/pkg/action/install.go b/pkg/action/install.go index 425b66f69..3792d2880 100644 --- a/pkg/action/install.go +++ b/pkg/action/install.go @@ -78,6 +78,7 @@ type Install struct { WaitForJobs bool Devel bool DependencyUpdate bool + LogOnFail bool Timeout time.Duration Namespace string ReleaseName string @@ -358,7 +359,11 @@ func (i *Install) performInstall(c chan<- resultMessage, rel *release.Release, t // pre-install hooks if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPreInstall, i.Timeout); err != nil { + if logs, err := i.cfg.execHook(rel, release.HookPreInstall, i.Timeout); err != nil { + if i.LogOnFail { + fmt.Println(logs) + } + i.reportToRun(c, rel, fmt.Errorf("failed pre-install: %s", err)) return } @@ -393,8 +398,13 @@ func (i *Install) performInstall(c chan<- resultMessage, rel *release.Release, t } } + // post-install hooks if !i.DisableHooks { - if err := i.cfg.execHook(rel, release.HookPostInstall, i.Timeout); err != nil { + if logs, err := i.cfg.execHook(rel, release.HookPostInstall, i.Timeout); err != nil { + if i.LogOnFail { + fmt.Println(logs) + } + i.reportToRun(c, rel, fmt.Errorf("failed post-install: %s", err)) return } diff --git a/pkg/action/release_testing.go b/pkg/action/release_testing.go index ecaeaf59f..196b0f6ba 100644 --- a/pkg/action/release_testing.go +++ b/pkg/action/release_testing.go @@ -88,7 +88,7 @@ func (r *ReleaseTesting) Run(name string) (*release.Release, error) { rel.Hooks = executingHooks } - if err := r.cfg.execHook(rel, release.HookTest, r.Timeout); err != nil { + if _, err := r.cfg.execHook(rel, release.HookTest, r.Timeout); err != nil { rel.Hooks = append(skippedHooks, rel.Hooks...) r.cfg.Releases.Update(rel) return rel, err diff --git a/pkg/action/rollback.go b/pkg/action/rollback.go index dda8c700b..b53816cc0 100644 --- a/pkg/action/rollback.go +++ b/pkg/action/rollback.go @@ -44,6 +44,7 @@ type Rollback struct { Recreate bool // will (if true) recreate pods after a rollback. Force bool // will (if true) force resource upgrade through uninstall/recreate if needed CleanupOnFail bool + LogOnFail bool MaxHistory int // MaxHistory limits the maximum number of revisions saved per release } @@ -154,10 +155,12 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas if err != nil { return targetRelease, errors.Wrap(err, "unable to build kubernetes objects from new release manifest") } - // pre-rollback hooks if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.Timeout); err != nil { + if logs, err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.Timeout); err != nil { + if r.LogOnFail { + fmt.Println(logs) + } return targetRelease, err } } else { @@ -224,7 +227,10 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas // post-rollback hooks if !r.DisableHooks { - if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.Timeout); err != nil { + if logs, err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.Timeout); err != nil { + if r.LogOnFail { + fmt.Println(logs) + } return targetRelease, err } } diff --git a/pkg/action/uninstall.go b/pkg/action/uninstall.go index 9dcbf19b0..6c28d8ebb 100644 --- a/pkg/action/uninstall.go +++ b/pkg/action/uninstall.go @@ -99,7 +99,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) res := &release.UninstallReleaseResponse{Release: rel} if !u.DisableHooks { - if err := u.cfg.execHook(rel, release.HookPreDelete, u.Timeout); err != nil { + if _, err := u.cfg.execHook(rel, release.HookPreDelete, u.Timeout); err != nil { return res, err } } else { @@ -132,7 +132,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) } if !u.DisableHooks { - if err := u.cfg.execHook(rel, release.HookPostDelete, u.Timeout); err != nil { + if _, err := u.cfg.execHook(rel, release.HookPostDelete, u.Timeout); err != nil { errs = append(errs, err) } } diff --git a/pkg/action/upgrade.go b/pkg/action/upgrade.go index 690397d4a..cc25e3117 100644 --- a/pkg/action/upgrade.go +++ b/pkg/action/upgrade.go @@ -88,6 +88,8 @@ type Upgrade struct { Atomic bool // CleanupOnFail will, if true, cause the upgrade to delete newly-created resources on a failed update. CleanupOnFail bool + // LogOnFail will, if true, cause the upgrade to show jobs log on a failed update. + LogOnFail bool // SubNotes determines whether sub-notes are rendered in the chart. SubNotes bool // Description is the description of this operation @@ -365,7 +367,10 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // pre-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.Timeout); err != nil { + if logs, err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.Timeout); err != nil { + if u.LogOnFail { + fmt.Println(logs) + } u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err)) return } @@ -411,7 +416,11 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele // post-upgrade hooks if !u.DisableHooks { - if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.Timeout); err != nil { + if logs, err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.Timeout); err != nil { + if u.LogOnFail { + fmt.Println(logs) + } + u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err)) return } diff --git a/pkg/kube/client.go b/pkg/kube/client.go index d30e5c535..6063403e5 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -763,7 +763,7 @@ func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) { if c.Type == batch.JobComplete && c.Status == "True" { return true, nil } else if c.Type == batch.JobFailed && c.Status == "True" { - return true, errors.Errorf("job failed: %s", c.Reason) + return true, errors.Errorf("job %s failed: %s", name, c.Reason) } }