From 854f7f6b7217dcfe135df9e4652517d3ec9c3913 Mon Sep 17 00:00:00 2001 From: Sebastien Tardif Date: Mon, 4 May 2026 13:57:52 -0700 Subject: [PATCH 1/2] fix: fetch logs from all containers in test pods When a test pod contains multiple containers (e.g. Istio/Consul/Vault sidecars), 'helm test --logs' failed with 'a container name must be specified'. This happened because GetPodLogs called the Kubernetes log API without specifying a container name. The fix fetches the pod spec first, then iterates over all containers (init containers + regular containers) and requests logs for each one explicitly. Errors from individual containers are collected and returned together via errors.Join rather than aborting on the first failure. Also fixes a typo: hooksByWight -> hooksByWeight. Closes #6902 Signed-off-by: Sebastien Tardif --- pkg/action/release_testing.go | 53 +++++++++++---- pkg/action/release_testing_test.go | 103 ++++++++++++++++++++++++++++- 2 files changed, 141 insertions(+), 15 deletions(-) diff --git a/pkg/action/release_testing.go b/pkg/action/release_testing.go index 043a41236..9de4e58f2 100644 --- a/pkg/action/release_testing.go +++ b/pkg/action/release_testing.go @@ -18,6 +18,7 @@ package action import ( "context" + "errors" "fmt" "io" "slices" @@ -25,6 +26,8 @@ import ( "time" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" chartutil "helm.sh/helm/v4/pkg/chart/v2/util" "helm.sh/helm/v4/pkg/kube" @@ -124,9 +127,9 @@ func (r *ReleaseTesting) GetPodLogs(out io.Writer, rel *release.Release) error { return fmt.Errorf("unable to get kubernetes client to fetch pod logs: %w", err) } - hooksByWight := append([]*release.Hook{}, rel.Hooks...) - sort.Stable(hookByWeight(hooksByWight)) - for _, h := range hooksByWight { + hooksByWeight := append([]*release.Hook{}, rel.Hooks...) + sort.Stable(hookByWeight(hooksByWeight)) + for _, h := range hooksByWeight { for _, e := range h.Events { if e == release.HookTest { if slices.Contains(r.Filters[ExcludeNameFilter], h.Name) { @@ -135,20 +138,42 @@ func (r *ReleaseTesting) GetPodLogs(out io.Writer, rel *release.Release) error { if len(r.Filters[IncludeNameFilter]) > 0 && !slices.Contains(r.Filters[IncludeNameFilter], h.Name) { continue } - req := client.CoreV1().Pods(r.Namespace).GetLogs(h.Name, &v1.PodLogOptions{}) - logReader, err := req.Stream(context.Background()) - if err != nil { - return fmt.Errorf("unable to get pod logs for %s: %w", h.Name, err) - } - - fmt.Fprintf(out, "POD LOGS: %s\n", h.Name) - _, err = io.Copy(out, logReader) - fmt.Fprintln(out) - if err != nil { - return fmt.Errorf("unable to write pod logs for %s: %w", h.Name, err) + if err := r.getContainerLogs(out, client, h.Name); err != nil { + return err } } } } return nil } + +// getContainerLogs fetches logs from all containers (init and regular) in the +// named pod and writes them to out. It continues on per-container errors and +// returns all of them joined at the end. +func (r *ReleaseTesting) getContainerLogs(out io.Writer, client kubernetes.Interface, podName string) error { + pod, err := client.CoreV1().Pods(r.Namespace).Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("unable to get pod %s: %w", podName, err) + } + + allContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...) + + var errs []error + for _, c := range allContainers { + opts := &v1.PodLogOptions{Container: c.Name} + req := client.CoreV1().Pods(r.Namespace).GetLogs(podName, opts) + logReader, err := req.Stream(context.Background()) + if err != nil { + errs = append(errs, fmt.Errorf("unable to get logs for pod %s, container %s: %w", podName, c.Name, err)) + continue + } + + fmt.Fprintf(out, "POD LOGS: %s (%s)\n", podName, c.Name) + _, err = io.Copy(out, logReader) + fmt.Fprintln(out) + if err != nil { + errs = append(errs, fmt.Errorf("unable to write logs for pod %s, container %s: %w", podName, c.Name, err)) + } + } + return errors.Join(errs...) +} diff --git a/pkg/action/release_testing_test.go b/pkg/action/release_testing_test.go index ab35e104a..ea647af80 100644 --- a/pkg/action/release_testing_test.go +++ b/pkg/action/release_testing_test.go @@ -22,10 +22,14 @@ import ( "errors" "io" "os" + "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" "helm.sh/helm/v4/pkg/cli" "helm.sh/helm/v4/pkg/kube" @@ -89,7 +93,7 @@ func TestReleaseTestingGetPodLogs_PodRetrievalError(t *testing.T) { }, } - require.ErrorContains(t, client.GetPodLogs(&bytes.Buffer{}, &release.Release{Hooks: hooks}), "unable to get pod logs") + require.ErrorContains(t, client.GetPodLogs(&bytes.Buffer{}, &release.Release{Hooks: hooks}), "unable to get pod") } func TestReleaseTesting_WaitOptionsPassedDownstream(t *testing.T) { @@ -117,3 +121,100 @@ func TestReleaseTesting_WaitOptionsPassedDownstream(t *testing.T) { // Verify that WaitOptions were passed to GetWaiter is.NotEmpty(failer.RecordedWaitOptions, "WaitOptions should be passed to GetWaiter") } + +func TestGetContainerLogs_MultipleContainers(t *testing.T) { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + {Name: "main"}, + {Name: "sidecar"}, + }, + }, + } + + client := fakeclientset.NewClientset(pod) + rt := &ReleaseTesting{Namespace: "default"} + + var buf bytes.Buffer + err := rt.getContainerLogs(&buf, client, "test-pod") + // The fake client doesn't serve real log streams, so we expect + // per-container errors rather than success, but critically it should + // NOT fail with "a container name must be specified". + if err != nil { + assert.NotContains(t, err.Error(), "a container name must be specified") + assert.Contains(t, err.Error(), "container main") + assert.Contains(t, err.Error(), "container sidecar") + } +} + +func TestGetContainerLogs_WithInitContainers(t *testing.T) { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + Spec: v1.PodSpec{ + InitContainers: []v1.Container{ + {Name: "init-setup"}, + }, + Containers: []v1.Container{ + {Name: "main"}, + }, + }, + } + + client := fakeclientset.NewClientset(pod) + rt := &ReleaseTesting{Namespace: "default"} + + var buf bytes.Buffer + err := rt.getContainerLogs(&buf, client, "test-pod") + if err != nil { + // Both init and regular containers should be attempted + assert.Contains(t, err.Error(), "container init-setup") + assert.Contains(t, err.Error(), "container main") + } +} + +func TestGetContainerLogs_PodNotFound(t *testing.T) { + client := fakeclientset.NewClientset() + rt := &ReleaseTesting{Namespace: "default"} + + var buf bytes.Buffer + err := rt.getContainerLogs(&buf, client, "nonexistent-pod") + require.Error(t, err) + assert.Contains(t, err.Error(), "unable to get pod nonexistent-pod") +} + +func TestGetPodLogs_MultiContainerOutput(t *testing.T) { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "multi-test", + Namespace: "default", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + {Name: "container-a"}, + {Name: "container-b"}, + }, + }, + } + + client := fakeclientset.NewClientset(pod) + rt := &ReleaseTesting{ + Namespace: "default", + Filters: map[string][]string{}, + } + + // Call getContainerLogs directly to test output formatting + var buf bytes.Buffer + _ = rt.getContainerLogs(&buf, client, "multi-test") + output := buf.String() + // Even if logs fail, check that header formatting uses container names + if len(output) > 0 { + assert.True(t, strings.Contains(output, "(container-a)") || strings.Contains(output, "(container-b)")) + } +} From 922558fc1a3b3f2c2b7e8ad0f32bd5bcd46ca70e Mon Sep 17 00:00:00 2001 From: Sebastien Tardif Date: Mon, 4 May 2026 14:04:25 -0700 Subject: [PATCH 2/2] fix: address review feedback - Close log stream after reading (prevents connection/fd leak) - Strengthen tests to assert on output headers rather than error paths - Remove unused import Signed-off-by: Sebastien Tardif --- pkg/action/release_testing.go | 1 + pkg/action/release_testing_test.go | 40 +++++++++++------------------- 2 files changed, 16 insertions(+), 25 deletions(-) diff --git a/pkg/action/release_testing.go b/pkg/action/release_testing.go index 9de4e58f2..8cb6ce664 100644 --- a/pkg/action/release_testing.go +++ b/pkg/action/release_testing.go @@ -170,6 +170,7 @@ func (r *ReleaseTesting) getContainerLogs(out io.Writer, client kubernetes.Inter fmt.Fprintf(out, "POD LOGS: %s (%s)\n", podName, c.Name) _, err = io.Copy(out, logReader) + logReader.Close() fmt.Fprintln(out) if err != nil { errs = append(errs, fmt.Errorf("unable to write logs for pod %s, container %s: %w", podName, c.Name, err)) diff --git a/pkg/action/release_testing_test.go b/pkg/action/release_testing_test.go index ea647af80..dcc708548 100644 --- a/pkg/action/release_testing_test.go +++ b/pkg/action/release_testing_test.go @@ -22,7 +22,6 @@ import ( "errors" "io" "os" - "strings" "testing" "github.com/stretchr/testify/assert" @@ -141,14 +140,10 @@ func TestGetContainerLogs_MultipleContainers(t *testing.T) { var buf bytes.Buffer err := rt.getContainerLogs(&buf, client, "test-pod") - // The fake client doesn't serve real log streams, so we expect - // per-container errors rather than success, but critically it should - // NOT fail with "a container name must be specified". - if err != nil { - assert.NotContains(t, err.Error(), "a container name must be specified") - assert.Contains(t, err.Error(), "container main") - assert.Contains(t, err.Error(), "container sidecar") - } + require.NoError(t, err) + output := buf.String() + assert.Contains(t, output, "POD LOGS: test-pod (main)") + assert.Contains(t, output, "POD LOGS: test-pod (sidecar)") } func TestGetContainerLogs_WithInitContainers(t *testing.T) { @@ -172,11 +167,11 @@ func TestGetContainerLogs_WithInitContainers(t *testing.T) { var buf bytes.Buffer err := rt.getContainerLogs(&buf, client, "test-pod") - if err != nil { - // Both init and regular containers should be attempted - assert.Contains(t, err.Error(), "container init-setup") - assert.Contains(t, err.Error(), "container main") - } + require.NoError(t, err) + output := buf.String() + // Init containers should appear before regular containers + assert.Contains(t, output, "POD LOGS: test-pod (init-setup)") + assert.Contains(t, output, "POD LOGS: test-pod (main)") } func TestGetContainerLogs_PodNotFound(t *testing.T) { @@ -189,7 +184,7 @@ func TestGetContainerLogs_PodNotFound(t *testing.T) { assert.Contains(t, err.Error(), "unable to get pod nonexistent-pod") } -func TestGetPodLogs_MultiContainerOutput(t *testing.T) { +func TestGetContainerLogs_OutputHeaderFormat(t *testing.T) { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "multi-test", @@ -204,17 +199,12 @@ func TestGetPodLogs_MultiContainerOutput(t *testing.T) { } client := fakeclientset.NewClientset(pod) - rt := &ReleaseTesting{ - Namespace: "default", - Filters: map[string][]string{}, - } + rt := &ReleaseTesting{Namespace: "default"} - // Call getContainerLogs directly to test output formatting var buf bytes.Buffer - _ = rt.getContainerLogs(&buf, client, "multi-test") + err := rt.getContainerLogs(&buf, client, "multi-test") + require.NoError(t, err) output := buf.String() - // Even if logs fail, check that header formatting uses container names - if len(output) > 0 { - assert.True(t, strings.Contains(output, "(container-a)") || strings.Contains(output, "(container-b)")) - } + assert.Contains(t, output, "POD LOGS: multi-test (container-a)") + assert.Contains(t, output, "POD LOGS: multi-test (container-b)") }