|
|
@ -220,6 +220,72 @@ func TestExtractTestManifestsFromHooks(t *testing.T) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func TestParallelTestRun(t *testing.T) {
|
|
|
|
|
|
|
|
ts := testSuiteFixture([]string{manifestWithTestSuccessHook, manifestWithTestSuccessHook})
|
|
|
|
|
|
|
|
env := testEnvFixture()
|
|
|
|
|
|
|
|
env.Parallel = true
|
|
|
|
|
|
|
|
env.KubeClient = newSleepOnWaitKubeClient()
|
|
|
|
|
|
|
|
if err := ts.Run(env); err != nil {
|
|
|
|
|
|
|
|
t.Errorf("%s", err)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if len(ts.Results) != 2 {
|
|
|
|
|
|
|
|
t.Errorf("Expected 2 test result. Got %v", len(ts.Results))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
stream := env.Stream.(*mockStream)
|
|
|
|
|
|
|
|
if len(stream.messages) != 4 {
|
|
|
|
|
|
|
|
t.Errorf("Expected four messages, Got: %v", len(stream.messages))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if stream.messages[0].Status != release.TestRun_RUNNING {
|
|
|
|
|
|
|
|
t.Errorf("Expected first message status to be RUNNING, Got: %v", stream.messages[0].Status)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if stream.messages[1].Status != release.TestRun_RUNNING {
|
|
|
|
|
|
|
|
t.Errorf("Expected second message status to be RUNNING, Got: %v", stream.messages[1].Status)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if stream.messages[2].Status != release.TestRun_SUCCESS {
|
|
|
|
|
|
|
|
t.Errorf("Expected third message status to be SUCCESS, Got: %v", stream.messages[2].Status)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if stream.messages[3].Status != release.TestRun_SUCCESS {
|
|
|
|
|
|
|
|
t.Errorf("Expected fourth message status to be SUCCESS, Got: %v", stream.messages[3].Status)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func TestParallelTestRunFailure(t *testing.T) {
|
|
|
|
|
|
|
|
ts := testSuiteFixture([]string{manifestWithTestSuccessHook, manifestWithTestFailureHook})
|
|
|
|
|
|
|
|
env := testEnvFixture()
|
|
|
|
|
|
|
|
env.Parallel = true
|
|
|
|
|
|
|
|
env.KubeClient = newSleepOnWaitKubeClient()
|
|
|
|
|
|
|
|
if err := ts.Run(env); err != nil {
|
|
|
|
|
|
|
|
t.Errorf("%s", err)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if len(ts.Results) != 2 {
|
|
|
|
|
|
|
|
t.Errorf("Expected 2 test result. Got %v", len(ts.Results))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
stream := env.Stream.(*mockStream)
|
|
|
|
|
|
|
|
if len(stream.messages) != 4 {
|
|
|
|
|
|
|
|
t.Errorf("Expected four messages, Got: %v", len(stream.messages))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if stream.messages[0].Status != release.TestRun_RUNNING {
|
|
|
|
|
|
|
|
t.Errorf("Expected first message status to be RUNNING, Got: %v", stream.messages[0].Status)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if stream.messages[1].Status != release.TestRun_RUNNING {
|
|
|
|
|
|
|
|
t.Errorf("Expected second message status to be RUNNING, Got: %v", stream.messages[1].Status)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if ts.Results[0].Status != release.TestRun_SUCCESS {
|
|
|
|
|
|
|
|
t.Errorf("Expected first test result to be successful, got: %v", ts.Results[0].Status)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if ts.Results[1].Status != release.TestRun_FAILURE {
|
|
|
|
|
|
|
|
t.Errorf("Expected second test result to be failure, got: %v", ts.Results[1].Status)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func chartStub() *chart.Chart {
|
|
|
|
func chartStub() *chart.Chart {
|
|
|
|
return &chart.Chart{
|
|
|
|
return &chart.Chart{
|
|
|
|
Metadata: &chart.Metadata{
|
|
|
|
Metadata: &chart.Metadata{
|
|
|
@ -328,6 +394,26 @@ func (p *podSucceededKubeClient) WaitAndGetCompletedPodPhase(ns string, r io.Rea
|
|
|
|
return v1.PodSucceeded, nil
|
|
|
|
return v1.PodSucceeded, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// For testing parallelism, this kube client
|
|
|
|
|
|
|
|
// will sleep for 1ms before returning completed pod
|
|
|
|
|
|
|
|
// phase.
|
|
|
|
|
|
|
|
type sleepOnWaitKubeClient struct {
|
|
|
|
|
|
|
|
tillerEnv.PrintingKubeClient
|
|
|
|
|
|
|
|
firstWait bool
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func newSleepOnWaitKubeClient() *sleepOnWaitKubeClient {
|
|
|
|
|
|
|
|
return &sleepOnWaitKubeClient{
|
|
|
|
|
|
|
|
PrintingKubeClient: tillerEnv.PrintingKubeClient{Out: ioutil.Discard},
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func (p *sleepOnWaitKubeClient) WaitAndGetCompletedPodPhase(ns string, r io.Reader, timeout time.Duration) (core.PodPhase, error) {
|
|
|
|
|
|
|
|
time.Sleep(1 * time.Millisecond)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return core.PodSucceeded, nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
type podFailedKubeClient struct {
|
|
|
|
type podFailedKubeClient struct {
|
|
|
|
tillerEnv.PrintingKubeClient
|
|
|
|
tillerEnv.PrintingKubeClient
|
|
|
|
}
|
|
|
|
}
|
|
|
|