TEST: add option to dump logs during helm test

Signed-off-by: Jeff Knurek <j.knurek@travelaudience.com>
pull/6612/head
Jeff Knurek 6 years ago committed by Matthew Fisher
parent 4e8063b103
commit b94608388e
No known key found for this signature in database
GPG Key ID: 92AA783CBAAE8E3B

@ -352,6 +352,8 @@ message TestReleaseRequest {
bool parallel = 4; bool parallel = 4;
// maximum number of test pods to run in parallel // maximum number of test pods to run in parallel
uint32 max_parallel = 5; uint32 max_parallel = 5;
// logs specifies whether or not to dump the logs from the test pods
bool logs = 6;
} }
// TestReleaseResponse represents a message from executing a test // TestReleaseResponse represents a message from executing a test

@ -41,6 +41,7 @@ type releaseTestCmd struct {
cleanup bool cleanup bool
parallel bool parallel bool
maxParallel uint32 maxParallel uint32
logs bool
} }
func newReleaseTestCmd(c helm.Interface, out io.Writer) *cobra.Command { func newReleaseTestCmd(c helm.Interface, out io.Writer) *cobra.Command {
@ -71,6 +72,7 @@ func newReleaseTestCmd(c helm.Interface, out io.Writer) *cobra.Command {
f.BoolVar(&rlsTest.cleanup, "cleanup", false, "Delete test pods upon completion") f.BoolVar(&rlsTest.cleanup, "cleanup", false, "Delete test pods upon completion")
f.BoolVar(&rlsTest.parallel, "parallel", false, "Run test pods in parallel") f.BoolVar(&rlsTest.parallel, "parallel", false, "Run test pods in parallel")
f.Uint32Var(&rlsTest.maxParallel, "max", 20, "Maximum number of test pods to run in parallel") f.Uint32Var(&rlsTest.maxParallel, "max", 20, "Maximum number of test pods to run in parallel")
f.BoolVar(&rlsTest.logs, "logs", false, "Dump the logs from test pods (this runs after all tests are complete, but before any cleanup")
// set defaults from environment // set defaults from environment
settings.InitTLS(f) settings.InitTLS(f)
@ -85,6 +87,7 @@ func (t *releaseTestCmd) run() (err error) {
helm.ReleaseTestCleanup(t.cleanup), helm.ReleaseTestCleanup(t.cleanup),
helm.ReleaseTestParallel(t.parallel), helm.ReleaseTestParallel(t.parallel),
helm.ReleaseTestMaxParallel(t.maxParallel), helm.ReleaseTestMaxParallel(t.maxParallel),
helm.ReleaseTestLogs(t.logs),
) )
testErr := &testErr{} testErr := &testErr{}

@ -241,6 +241,13 @@ func ReleaseTestMaxParallel(max uint32) ReleaseTestOption {
} }
} }
// ReleaseTestLogs is a boolean value representing whether to dump the logs from test pods
func ReleaseTestLogs(logs bool) ReleaseTestOption {
return func(opts *options) {
opts.testReq.Logs = logs
}
}
// RollbackTimeout specifies the number of seconds before kubernetes calls timeout // RollbackTimeout specifies the number of seconds before kubernetes calls timeout
func RollbackTimeout(timeout int64) RollbackOption { func RollbackTimeout(timeout int64) RollbackOption {
return func(opts *options) { return func(opts *options) {

@ -947,6 +947,24 @@ func (c *Client) watchPodUntilComplete(timeout time.Duration, info *resource.Inf
return err return err
} }
// GetPodLogs takes pod name and namespace and returns the current logs (streaming is NOT enabled).
func (c *Client) GetPodLogs(name, ns string) (string, error) {
client, _ := c.KubernetesClientSet()
req := client.CoreV1().Pods(ns).GetLogs(name, &v1.PodLogOptions{})
podLogs, err := req.Stream()
if err != nil {
return "", fmt.Errorf("error in opening log stream, got: %s", err)
}
defer podLogs.Close()
buf := new(bytes.Buffer)
_, err = io.Copy(buf, podLogs)
if err != nil {
return "", fmt.Errorf("error in copy information from log stream to buf, got: %s", err)
}
return buf.String(), nil
}
func isPodComplete(event watch.Event) (bool, error) { func isPodComplete(event watch.Event) (bool, error) {
o, ok := event.Object.(*v1.Pod) o, ok := event.Object.(*v1.Pod)
if !ok { if !ok {

@ -96,7 +96,7 @@ func (env *Environment) streamError(info string) error {
} }
func (env *Environment) streamFailed(name string) error { func (env *Environment) streamFailed(name string) error {
msg := fmt.Sprintf("FAILED: %s, run `kubectl logs %s --namespace %s` for more info", name, name, env.Namespace) msg := "FAILED: " + name
return env.streamMessage(msg, release.TestRun_FAILURE) return env.streamMessage(msg, release.TestRun_FAILURE)
} }
@ -126,3 +126,21 @@ func (env *Environment) DeleteTestPods(testManifests []string) {
} }
} }
} }
func (env *Environment) GetLogs(testManifests []string) {
for _, testManifest := range testManifests {
infos, err := env.KubeClient.Build(env.Namespace, bytes.NewBufferString(testManifest))
if err != nil {
env.streamError(err.Error())
continue
}
podName := infos[0].Object.(*v1.Pod).Name
logs, err := env.KubeClient.GetPodLogs(podName, env.Namespace)
if err != nil {
env.streamError(err.Error())
continue
}
msg := fmt.Sprintf("\nPOD LOGS: %s\n%s", podName, logs)
env.streamMessage(msg, release.TestRun_UNKNOWN)
}
}

@ -175,6 +175,8 @@ type KubeClient interface {
// and returns said phase (PodSucceeded or PodFailed qualify). // and returns said phase (PodSucceeded or PodFailed qualify).
WaitAndGetCompletedPodPhase(namespace string, reader io.Reader, timeout time.Duration) (v1.PodPhase, error) WaitAndGetCompletedPodPhase(namespace string, reader io.Reader, timeout time.Duration) (v1.PodPhase, error)
GetPodLogs(name, namespace string) (string, error)
WaitUntilCRDEstablished(reader io.Reader, timeout time.Duration) error WaitUntilCRDEstablished(reader io.Reader, timeout time.Duration) error
} }
@ -255,6 +257,11 @@ func (p *PrintingKubeClient) WaitAndGetCompletedPodPhase(namespace string, reade
return v1.PodUnknown, err return v1.PodUnknown, err
} }
// GetPodLogs implements KubeClient GetPodLogs.
func (p *PrintingKubeClient) GetPodLogs(name, ns string) (string, error) {
return "", nil
}
// WaitUntilCRDEstablished implements KubeClient WaitUntilCRDEstablished. // WaitUntilCRDEstablished implements KubeClient WaitUntilCRDEstablished.
func (p *PrintingKubeClient) WaitUntilCRDEstablished(reader io.Reader, timeout time.Duration) error { func (p *PrintingKubeClient) WaitUntilCRDEstablished(reader io.Reader, timeout time.Duration) error {
_, err := io.Copy(p.Out, reader) _, err := io.Copy(p.Out, reader)

@ -69,6 +69,10 @@ func (s *ReleaseServer) RunReleaseTest(req *services.TestReleaseRequest, stream
Results: tSuite.Results, Results: tSuite.Results,
} }
if req.Logs {
testEnv.GetLogs(tSuite.TestManifests)
}
if req.Cleanup { if req.Cleanup {
testEnv.DeleteTestPods(tSuite.TestManifests) testEnv.DeleteTestPods(tSuite.TestManifests)
} }

Loading…
Cancel
Save