feat: hooks error logs

This feature allows you to show the logs from failed hooks.

Signed-off-by: Serge Logvinov <serge.logvinov@sinextra.dev>
pull/11228/head
Serge Logvinov 3 years ago committed by Serge
parent 5bf273d81b
commit bc131928a9

@ -166,6 +166,7 @@ func addInstallFlags(cmd *cobra.Command, f *pflag.FlagSet, client *action.Instal
f.StringVar(&client.Description, "description", "", "add a custom description") f.StringVar(&client.Description, "description", "", "add a custom description")
f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored") f.BoolVar(&client.Devel, "devel", false, "use development versions, too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored")
f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart") f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart")
f.BoolVar(&client.LogOnFail, "log-on-fail", false, "show failed job log when installing process fails")
f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema") f.BoolVar(&client.DisableOpenAPIValidation, "disable-openapi-validation", false, "if set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema")
f.BoolVar(&client.Atomic, "atomic", false, "if set, the installation process deletes the installation on failure. The --wait flag will be set automatically if --atomic is used") f.BoolVar(&client.Atomic, "atomic", false, "if set, the installation process deletes the installation on failure. The --wait flag will be set automatically if --atomic is used")
f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed. By default, CRDs are installed if not already present") f.BoolVar(&client.SkipCRDs, "skip-crds", false, "if set, no CRDs will be installed. By default, CRDs are installed if not already present")

@ -84,6 +84,7 @@ func newRollbackCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.Wait, "wait", false, "if set, will wait until all Pods, PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, or ReplicaSet are in a ready state before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout") f.BoolVar(&client.WaitForJobs, "wait-for-jobs", false, "if set and --wait enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as --timeout")
f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this rollback when rollback fails") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this rollback when rollback fails")
f.BoolVar(&client.LogOnFail, "log-on-fail", false, "show failed job log when rollback fails")
f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit")
return cmd return cmd

@ -119,6 +119,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
instClient.SubNotes = client.SubNotes instClient.SubNotes = client.SubNotes
instClient.Description = client.Description instClient.Description = client.Description
instClient.DependencyUpdate = client.DependencyUpdate instClient.DependencyUpdate = client.DependencyUpdate
instClient.LogOnFail = client.LogOnFail
rel, err := runInstall(args, instClient, valueOpts, out) rel, err := runInstall(args, instClient, valueOpts, out)
if err != nil { if err != nil {
@ -199,6 +200,12 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
rel, err := client.RunWithContext(ctx, args[0], ch, vals) rel, err := client.RunWithContext(ctx, args[0], ch, vals)
if err != nil { if err != nil {
// if client.LogOnFail && outfmt == output.Table {
// for _, hook := range rel.Hooks {
// fmt.Fprintf(out, hook.LastRun.Logs)
// }
// }
return errors.Wrap(err, "UPGRADE FAILED") return errors.Wrap(err, "UPGRADE FAILED")
} }
@ -229,6 +236,7 @@ func newUpgradeCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically if --atomic is used") f.BoolVar(&client.Atomic, "atomic", false, "if set, upgrade process rolls back changes made in case of failed upgrade. The --wait flag will be set automatically if --atomic is used")
f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit") f.IntVar(&client.MaxHistory, "history-max", settings.MaxHistory, "limit the maximum number of revisions saved per release. Use 0 for no limit")
f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this upgrade when upgrade fails") f.BoolVar(&client.CleanupOnFail, "cleanup-on-fail", false, "allow deletion of new resources created in this upgrade when upgrade fails")
f.BoolVar(&client.LogOnFail, "log-on-fail", false, "show failed job log when upgrade fails")
f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent") f.BoolVar(&client.SubNotes, "render-subchart-notes", false, "if set, render subchart notes along with the parent")
f.StringVar(&client.Description, "description", "", "add a custom description") f.StringVar(&client.Description, "description", "", "add a custom description")
f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart") f.BoolVar(&client.DependencyUpdate, "dependency-update", false, "update dependencies if they are missing before installing the chart")

@ -17,17 +17,24 @@ package action
import ( import (
"bytes" "bytes"
"context"
"fmt"
"io"
"sort" "sort"
"strings"
"time" "time"
"github.com/pkg/errors" "github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"helm.sh/helm/v3/pkg/kube"
"helm.sh/helm/v3/pkg/release" "helm.sh/helm/v3/pkg/release"
helmtime "helm.sh/helm/v3/pkg/time" helmtime "helm.sh/helm/v3/pkg/time"
) )
// execHook executes all of the hooks for the given hook event. // execHook executes all of the hooks for the given hook event.
func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, timeout time.Duration) error { func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, timeout time.Duration) (string, error) {
executingHooks := []*release.Hook{} executingHooks := []*release.Hook{}
for _, h := range rl.Hooks { for _, h := range rl.Hooks {
@ -52,12 +59,12 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
} }
if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation); err != nil { if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation); err != nil {
return err return "", err
} }
resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), true) resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), true)
if err != nil { if err != nil {
return errors.Wrapf(err, "unable to build kubernetes object for %s hook %s", hook, h.Path) return "", errors.Wrapf(err, "unable to build kubernetes object for %s hook %s", hook, h.Path)
} }
// Record the time at which the hook was applied to the cluster // Record the time at which the hook was applied to the cluster
@ -76,7 +83,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
if _, err := cfg.KubeClient.Create(resources); err != nil { if _, err := cfg.KubeClient.Create(resources); err != nil {
h.LastRun.CompletedAt = helmtime.Now() h.LastRun.CompletedAt = helmtime.Now()
h.LastRun.Phase = release.HookPhaseFailed h.LastRun.Phase = release.HookPhaseFailed
return errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path) return "", errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path)
} }
// Watch hook resources until they have completed // Watch hook resources until they have completed
@ -86,12 +93,18 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
// Mark hook as succeeded or failed // Mark hook as succeeded or failed
if err != nil { if err != nil {
h.LastRun.Phase = release.HookPhaseFailed h.LastRun.Phase = release.HookPhaseFailed
logs, lerr := cfg.hookGetLogs(rl, h, resources)
if lerr != nil {
return logs, lerr
}
// If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted
// under failed condition. If so, then clear the corresponding resource object in the hook // under failed condition. If so, then clear the corresponding resource object in the hook
if err := cfg.deleteHookByPolicy(h, release.HookFailed); err != nil { if err := cfg.deleteHookByPolicy(h, release.HookFailed); err != nil {
return err return logs, err
} }
return err return logs, err
} }
h.LastRun.Phase = release.HookPhaseSucceeded h.LastRun.Phase = release.HookPhaseSucceeded
} }
@ -100,11 +113,11 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent,
// under succeeded condition. If so, then clear the corresponding resource object in each hook // under succeeded condition. If so, then clear the corresponding resource object in each hook
for _, h := range executingHooks { for _, h := range executingHooks {
if err := cfg.deleteHookByPolicy(h, release.HookSucceeded); err != nil { if err := cfg.deleteHookByPolicy(h, release.HookSucceeded); err != nil {
return err return "", err
} }
} }
return nil return "", nil
} }
// hookByWeight is a sorter for hooks // hookByWeight is a sorter for hooks
@ -149,3 +162,83 @@ func hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool
} }
return false return false
} }
func (cfg *Configuration) hookGetLogs(rl *release.Release, h *release.Hook, resources kube.ResourceList) (string, error) {
client, err := cfg.KubernetesClientSet()
if err != nil {
return "", errors.Wrap(err, "unable to get kubernetes client to fetch pod logs")
}
switch h.Kind {
case "Job":
for _, res := range resources {
versioned := kube.AsVersioned(res)
selector, err := kube.SelectorsForObject(versioned)
if err != nil {
// If no selector is returned, it means this object is
// definitely not a pod, so continue onward
continue
}
pods, err := client.CoreV1().Pods(res.Namespace).List(context.Background(), metav1.ListOptions{
LabelSelector: selector.String(),
})
if err != nil {
return "", errors.Wrapf(err, "unable to get pods for object %s because an error occurred", res.Name)
}
var logs []string
for _, pod := range pods.Items {
log, err := cfg.hookGetPodLogs(rl, &pod)
if err != nil {
return "", err
}
logs = append(logs, log)
}
return strings.Join(logs, "\n"), nil
}
case "Pod":
pod, err := client.CoreV1().Pods(rl.Namespace).Get(context.Background(), h.Name, metav1.GetOptions{})
if err != nil {
return "", errors.Wrapf(err, "unable to get pods for object %s because an error occurred", h.Name)
}
return cfg.hookGetPodLogs(rl, pod)
default:
return "", nil
}
return "", nil
}
func (cfg *Configuration) hookGetPodLogs(rl *release.Release, pod *v1.Pod) (string, error) {
client, err := cfg.KubernetesClientSet()
if err != nil {
return "", errors.Wrap(err, "unable to get kubernetes client to fetch pod logs")
}
var logs []string
for _, container := range pod.Spec.Containers {
req := client.CoreV1().Pods(rl.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{
Container: container.Name,
Follow: false,
})
logReader, err := req.Stream(context.Background())
if err != nil {
return "", errors.Wrapf(err, "unable to get pod logs for object %s/%s", pod.Name, container.Name)
}
defer logReader.Close()
out, _ := io.ReadAll(logReader)
logs = append(logs, fmt.Sprintf("HOOK LOGS: pod %s, container %s:\n%s", pod.Name, container.Name, string(out)))
cfg.Log("HOOK LOGS: pod %s, container %s:\n%s", pod.Name, container.Name, string(out))
}
return strings.Join(logs, "\n"), nil
}

@ -78,6 +78,7 @@ type Install struct {
WaitForJobs bool WaitForJobs bool
Devel bool Devel bool
DependencyUpdate bool DependencyUpdate bool
LogOnFail bool
Timeout time.Duration Timeout time.Duration
Namespace string Namespace string
ReleaseName string ReleaseName string
@ -358,7 +359,11 @@ func (i *Install) performInstall(c chan<- resultMessage, rel *release.Release, t
// pre-install hooks // pre-install hooks
if !i.DisableHooks { if !i.DisableHooks {
if err := i.cfg.execHook(rel, release.HookPreInstall, i.Timeout); err != nil { if logs, err := i.cfg.execHook(rel, release.HookPreInstall, i.Timeout); err != nil {
if i.LogOnFail {
fmt.Println(logs)
}
i.reportToRun(c, rel, fmt.Errorf("failed pre-install: %s", err)) i.reportToRun(c, rel, fmt.Errorf("failed pre-install: %s", err))
return return
} }
@ -393,8 +398,13 @@ func (i *Install) performInstall(c chan<- resultMessage, rel *release.Release, t
} }
} }
// post-install hooks
if !i.DisableHooks { if !i.DisableHooks {
if err := i.cfg.execHook(rel, release.HookPostInstall, i.Timeout); err != nil { if logs, err := i.cfg.execHook(rel, release.HookPostInstall, i.Timeout); err != nil {
if i.LogOnFail {
fmt.Println(logs)
}
i.reportToRun(c, rel, fmt.Errorf("failed post-install: %s", err)) i.reportToRun(c, rel, fmt.Errorf("failed post-install: %s", err))
return return
} }

@ -88,7 +88,7 @@ func (r *ReleaseTesting) Run(name string) (*release.Release, error) {
rel.Hooks = executingHooks rel.Hooks = executingHooks
} }
if err := r.cfg.execHook(rel, release.HookTest, r.Timeout); err != nil { if _, err := r.cfg.execHook(rel, release.HookTest, r.Timeout); err != nil {
rel.Hooks = append(skippedHooks, rel.Hooks...) rel.Hooks = append(skippedHooks, rel.Hooks...)
r.cfg.Releases.Update(rel) r.cfg.Releases.Update(rel)
return rel, err return rel, err

@ -44,6 +44,7 @@ type Rollback struct {
Recreate bool // will (if true) recreate pods after a rollback. Recreate bool // will (if true) recreate pods after a rollback.
Force bool // will (if true) force resource upgrade through uninstall/recreate if needed Force bool // will (if true) force resource upgrade through uninstall/recreate if needed
CleanupOnFail bool CleanupOnFail bool
LogOnFail bool
MaxHistory int // MaxHistory limits the maximum number of revisions saved per release MaxHistory int // MaxHistory limits the maximum number of revisions saved per release
} }
@ -154,10 +155,12 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
if err != nil { if err != nil {
return targetRelease, errors.Wrap(err, "unable to build kubernetes objects from new release manifest") return targetRelease, errors.Wrap(err, "unable to build kubernetes objects from new release manifest")
} }
// pre-rollback hooks // pre-rollback hooks
if !r.DisableHooks { if !r.DisableHooks {
if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.Timeout); err != nil { if logs, err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.Timeout); err != nil {
if r.LogOnFail {
fmt.Println(logs)
}
return targetRelease, err return targetRelease, err
} }
} else { } else {
@ -224,7 +227,10 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas
// post-rollback hooks // post-rollback hooks
if !r.DisableHooks { if !r.DisableHooks {
if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.Timeout); err != nil { if logs, err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.Timeout); err != nil {
if r.LogOnFail {
fmt.Println(logs)
}
return targetRelease, err return targetRelease, err
} }
} }

@ -99,7 +99,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
res := &release.UninstallReleaseResponse{Release: rel} res := &release.UninstallReleaseResponse{Release: rel}
if !u.DisableHooks { if !u.DisableHooks {
if err := u.cfg.execHook(rel, release.HookPreDelete, u.Timeout); err != nil { if _, err := u.cfg.execHook(rel, release.HookPreDelete, u.Timeout); err != nil {
return res, err return res, err
} }
} else { } else {
@ -132,7 +132,7 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error)
} }
if !u.DisableHooks { if !u.DisableHooks {
if err := u.cfg.execHook(rel, release.HookPostDelete, u.Timeout); err != nil { if _, err := u.cfg.execHook(rel, release.HookPostDelete, u.Timeout); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
} }

@ -88,6 +88,8 @@ type Upgrade struct {
Atomic bool Atomic bool
// CleanupOnFail will, if true, cause the upgrade to delete newly-created resources on a failed update. // CleanupOnFail will, if true, cause the upgrade to delete newly-created resources on a failed update.
CleanupOnFail bool CleanupOnFail bool
// LogOnFail will, if true, cause the upgrade to show jobs log on a failed update.
LogOnFail bool
// SubNotes determines whether sub-notes are rendered in the chart. // SubNotes determines whether sub-notes are rendered in the chart.
SubNotes bool SubNotes bool
// Description is the description of this operation // Description is the description of this operation
@ -365,7 +367,10 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
// pre-upgrade hooks // pre-upgrade hooks
if !u.DisableHooks { if !u.DisableHooks {
if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.Timeout); err != nil { if logs, err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.Timeout); err != nil {
if u.LogOnFail {
fmt.Println(logs)
}
u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err)) u.reportToPerformUpgrade(c, upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err))
return return
} }
@ -411,7 +416,11 @@ func (u *Upgrade) releasingUpgrade(c chan<- resultMessage, upgradedRelease *rele
// post-upgrade hooks // post-upgrade hooks
if !u.DisableHooks { if !u.DisableHooks {
if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.Timeout); err != nil { if logs, err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.Timeout); err != nil {
if u.LogOnFail {
fmt.Println(logs)
}
u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err)) u.reportToPerformUpgrade(c, upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err))
return return
} }

@ -763,7 +763,7 @@ func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) {
if c.Type == batch.JobComplete && c.Status == "True" { if c.Type == batch.JobComplete && c.Status == "True" {
return true, nil return true, nil
} else if c.Type == batch.JobFailed && c.Status == "True" { } else if c.Type == batch.JobFailed && c.Status == "True" {
return true, errors.Errorf("job failed: %s", c.Reason) return true, errors.Errorf("job %s failed: %s", name, c.Reason)
} }
} }

Loading…
Cancel
Save