From 6a64ed73c22f54b309bb311cdfde51ae825f2430 Mon Sep 17 00:00:00 2001 From: Soren Mathiasen Date: Wed, 8 Dec 2021 09:14:47 +0100 Subject: [PATCH] Print pods status when --debug flag is enabled Insted of only printing what pods are not ready, let's include a bit more detailed information on why that is. This makes it much easier to debug errors when running helm upgrade or install with debug enabled Signed-off-by: Soren Mathiasen --- pkg/kube/ready.go | 51 +++++++++++++++++++++++++++++++++++++++--- pkg/kube/ready_test.go | 2 +- 2 files changed, 49 insertions(+), 4 deletions(-) diff --git a/pkg/kube/ready.go b/pkg/kube/ready.go index 5d080d9bf..2f0194c45 100644 --- a/pkg/kube/ready.go +++ b/pkg/kube/ready.go @@ -18,6 +18,7 @@ package kube // import "helm.sh/helm/v3/pkg/kube" import ( "context" + "fmt" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -123,7 +124,7 @@ func (c *ReadyChecker) IsReady(ctx context.Context, v *resource.Info) (bool, err if err != nil || newReplicaSet == nil { return false, err } - if !c.deploymentReady(newReplicaSet, currentDeployment) { + if !c.deploymentReady(ctx, newReplicaSet, currentDeployment) { return false, nil } case *corev1.PersistentVolumeClaim: @@ -218,7 +219,11 @@ func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool { return true } } - c.log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName()) + msg := podDetails(pod) + if msg == "" { + msg = string(pod.Status.Phase) + } + c.log("Pod is not ready: %s/%s %s", pod.GetNamespace(), pod.GetName(), msg) return false } @@ -271,10 +276,19 @@ func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool { return true } -func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deployment) bool { +func (c *ReadyChecker) deploymentReady(ctx context.Context, rs *appsv1.ReplicaSet, dep *appsv1.Deployment) bool { expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep) if !(rs.Status.ReadyReplicas >= expectedReady) { c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady) + pods, err := c.podsforObject(ctx, rs.Namespace, rs) + if err != nil { + c.log("Error getting pods for ReplicaSet %s/%s: %v", rs.Namespace, rs.Name, err) + return false + } + for _, pod := range pods { + // Print out pod status + c.isPodReady(&pod) + } return false } return true @@ -395,3 +409,34 @@ func getPods(ctx context.Context, client kubernetes.Interface, namespace, select }) return list.Items, err } + +func podDetails(pod *corev1.Pod) string { + for _, c := range pod.Status.Conditions { + if c.Type == corev1.PodScheduled { + if c.Reason == corev1.PodReasonUnschedulable { + msg := c.Reason + if len(c.Message) > 0 { + msg += fmt.Sprintf(" (unscheduled: %s)", c.Message) + } + return msg + } + } + } + + statuses := []corev1.ContainerStatus{} + statuses = append(statuses, pod.Status.InitContainerStatuses...) + statuses = append(statuses, pod.Status.EphemeralContainerStatuses...) + statuses = append(statuses, pod.Status.ContainerStatuses...) + + for _, s := range statuses { + if s.State.Waiting != nil { + msg := s.State.Waiting.Reason + if len(s.State.Waiting.Message) > 0 { + msg += fmt.Sprintf(" (waiting: %s)", s.State.Waiting.Message) + } + return msg + } + } + + return "" +} diff --git a/pkg/kube/ready_test.go b/pkg/kube/ready_test.go index 931b8fa19..1a2cbf09a 100644 --- a/pkg/kube/ready_test.go +++ b/pkg/kube/ready_test.go @@ -69,7 +69,7 @@ func Test_ReadyChecker_deploymentReady(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := NewReadyChecker(fake.NewSimpleClientset(), nil) - if got := c.deploymentReady(tt.args.rs, tt.args.dep); got != tt.want { + if got := c.deploymentReady(context.TODO(), tt.args.rs, tt.args.dep); got != tt.want { t.Errorf("deploymentReady() = %v, want %v", got, tt.want) } })