mirror of https://github.com/helm/helm
Merge pull request #8363 from zh168654/master
helm upgrade with --wait support jobs in manifest to be completedpull/9139/head
commit
f5ef87b96e
@ -0,0 +1,6 @@
|
||||
NAME: apollo
|
||||
LAST DEPLOYED: Fri Sep 2 22:04:05 1977
|
||||
NAMESPACE: default
|
||||
STATUS: deployed
|
||||
REVISION: 1
|
||||
TEST SUITE: None
|
@ -0,0 +1 @@
|
||||
Rollback was a success! Happy Helming!
|
@ -0,0 +1,7 @@
|
||||
Release "crazy-bunny" has been upgraded. Happy Helming!
|
||||
NAME: crazy-bunny
|
||||
LAST DEPLOYED: Fri Sep 2 22:04:05 1977
|
||||
NAMESPACE: default
|
||||
STATUS: deployed
|
||||
REVISION: 3
|
||||
TEST SUITE: None
|
@ -0,0 +1,515 @@
|
||||
/*
|
||||
Copyright The Helm Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kube // import "helm.sh/helm/v3/pkg/kube"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
const defaultNamespace = metav1.NamespaceDefault
|
||||
|
||||
func Test_waiter_deploymentReady(t *testing.T) {
|
||||
type args struct {
|
||||
rs *appsv1.ReplicaSet
|
||||
dep *appsv1.Deployment
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "deployment is ready",
|
||||
args: args{
|
||||
rs: newReplicaSet("foo", 1, 1),
|
||||
dep: newDeployment("foo", 1, 1, 0),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "deployment is not ready",
|
||||
args: args{
|
||||
rs: newReplicaSet("foo", 0, 0),
|
||||
dep: newDeployment("foo", 1, 1, 0),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "deployment is ready when maxUnavailable is set",
|
||||
args: args{
|
||||
rs: newReplicaSet("foo", 2, 1),
|
||||
dep: newDeployment("foo", 2, 1, 1),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
w := &waiter{
|
||||
c: fake.NewSimpleClientset(),
|
||||
log: nopLogger,
|
||||
}
|
||||
if got := w.deploymentReady(tt.args.rs, tt.args.dep); got != tt.want {
|
||||
t.Errorf("deploymentReady() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_waiter_daemonSetReady(t *testing.T) {
|
||||
type args struct {
|
||||
ds *appsv1.DaemonSet
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "daemonset is ready",
|
||||
args: args{
|
||||
ds: newDaemonSet("foo", 0, 1, 1, 1),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "daemonset is not ready",
|
||||
args: args{
|
||||
ds: newDaemonSet("foo", 0, 0, 1, 1),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "daemonset pods have not been scheduled successfully",
|
||||
args: args{
|
||||
ds: newDaemonSet("foo", 0, 0, 1, 0),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "daemonset is ready when maxUnavailable is set",
|
||||
args: args{
|
||||
ds: newDaemonSet("foo", 1, 1, 2, 2),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
w := &waiter{
|
||||
c: fake.NewSimpleClientset(),
|
||||
log: nopLogger,
|
||||
}
|
||||
if got := w.daemonSetReady(tt.args.ds); got != tt.want {
|
||||
t.Errorf("daemonSetReady() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_waiter_statefulSetReady(t *testing.T) {
|
||||
type args struct {
|
||||
sts *appsv1.StatefulSet
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "statefulset is ready",
|
||||
args: args{
|
||||
sts: newStatefulSet("foo", 1, 0, 1, 1),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "statefulset is not ready",
|
||||
args: args{
|
||||
sts: newStatefulSet("foo", 1, 0, 0, 1),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "statefulset is ready when partition is specified",
|
||||
args: args{
|
||||
sts: newStatefulSet("foo", 2, 1, 2, 1),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "statefulset is not ready when partition is set",
|
||||
args: args{
|
||||
sts: newStatefulSet("foo", 1, 1, 1, 1),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
w := &waiter{
|
||||
c: fake.NewSimpleClientset(),
|
||||
log: nopLogger,
|
||||
}
|
||||
if got := w.statefulSetReady(tt.args.sts); got != tt.want {
|
||||
t.Errorf("statefulSetReady() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_waiter_podsReadyForObject(t *testing.T) {
|
||||
type args struct {
|
||||
namespace string
|
||||
obj runtime.Object
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
existPods []corev1.Pod
|
||||
want bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "pods ready for a replicaset",
|
||||
args: args{
|
||||
namespace: defaultNamespace,
|
||||
obj: newReplicaSet("foo", 1, 1),
|
||||
},
|
||||
existPods: []corev1.Pod{
|
||||
*newPodWithCondition("foo", corev1.ConditionTrue),
|
||||
},
|
||||
want: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "pods not ready for a replicaset",
|
||||
args: args{
|
||||
namespace: defaultNamespace,
|
||||
obj: newReplicaSet("foo", 1, 1),
|
||||
},
|
||||
existPods: []corev1.Pod{
|
||||
*newPodWithCondition("foo", corev1.ConditionFalse),
|
||||
},
|
||||
want: false,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
w := &waiter{
|
||||
c: fake.NewSimpleClientset(),
|
||||
log: nopLogger,
|
||||
}
|
||||
for _, pod := range tt.existPods {
|
||||
if _, err := w.c.CoreV1().Pods(defaultNamespace).Create(context.TODO(), &pod, metav1.CreateOptions{}); err != nil {
|
||||
t.Errorf("Failed to create Pod error: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
got, err := w.podsReadyForObject(tt.args.namespace, tt.args.obj)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("podsReadyForObject() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("podsReadyForObject() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_waiter_jobReady(t *testing.T) {
|
||||
type args struct {
|
||||
job *batchv1.Job
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "job is completed",
|
||||
args: args{job: newJob("foo", 1, 1, 1, 0)},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "job is incomplete",
|
||||
args: args{job: newJob("foo", 1, 1, 0, 0)},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "job is failed",
|
||||
args: args{job: newJob("foo", 1, 1, 0, 1)},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
w := &waiter{
|
||||
c: fake.NewSimpleClientset(),
|
||||
log: nopLogger,
|
||||
}
|
||||
if got := w.jobReady(tt.args.job); got != tt.want {
|
||||
t.Errorf("jobReady() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_waiter_volumeReady(t *testing.T) {
|
||||
type args struct {
|
||||
v *corev1.PersistentVolumeClaim
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "pvc is bound",
|
||||
args: args{
|
||||
v: newPersistentVolumeClaim("foo", corev1.ClaimBound),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "pvc is not ready",
|
||||
args: args{
|
||||
v: newPersistentVolumeClaim("foo", corev1.ClaimPending),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
w := &waiter{
|
||||
c: fake.NewSimpleClientset(),
|
||||
log: nopLogger,
|
||||
}
|
||||
if got := w.volumeReady(tt.args.v); got != tt.want {
|
||||
t.Errorf("volumeReady() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newDaemonSet(name string, maxUnavailable, numberReady, desiredNumberScheduled, updatedNumberScheduled int) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: defaultNamespace,
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
UpdateStrategy: appsv1.DaemonSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &appsv1.RollingUpdateDaemonSet{
|
||||
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(maxUnavailable); return &i }(),
|
||||
},
|
||||
},
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{"name": name},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Image: "nginx",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: appsv1.DaemonSetStatus{
|
||||
DesiredNumberScheduled: int32(desiredNumberScheduled),
|
||||
NumberReady: int32(numberReady),
|
||||
UpdatedNumberScheduled: int32(updatedNumberScheduled),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newStatefulSet(name string, replicas, partition, readyReplicas, updatedReplicas int) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: defaultNamespace,
|
||||
},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: &appsv1.RollingUpdateStatefulSetStrategy{
|
||||
Partition: intToInt32(partition),
|
||||
},
|
||||
},
|
||||
Replicas: intToInt32(replicas),
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{"name": name},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Image: "nginx",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: appsv1.StatefulSetStatus{
|
||||
UpdatedReplicas: int32(updatedReplicas),
|
||||
ReadyReplicas: int32(readyReplicas),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newDeployment(name string, replicas, maxSurge, maxUnavailable int) *appsv1.Deployment {
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: defaultNamespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &appsv1.RollingUpdateDeployment{
|
||||
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(maxUnavailable); return &i }(),
|
||||
MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(maxSurge); return &i }(),
|
||||
},
|
||||
},
|
||||
Replicas: intToInt32(replicas),
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{"name": name},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Image: "nginx",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newReplicaSet(name string, replicas int, readyReplicas int) *appsv1.ReplicaSet {
|
||||
d := newDeployment(name, replicas, 0, 0)
|
||||
return &appsv1.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: defaultNamespace,
|
||||
Labels: d.Spec.Selector.MatchLabels,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, d.GroupVersionKind())},
|
||||
},
|
||||
Spec: appsv1.ReplicaSetSpec{
|
||||
Selector: d.Spec.Selector,
|
||||
Replicas: intToInt32(replicas),
|
||||
Template: d.Spec.Template,
|
||||
},
|
||||
Status: appsv1.ReplicaSetStatus{
|
||||
ReadyReplicas: int32(readyReplicas),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newPodWithCondition(name string, podReadyCondition corev1.ConditionStatus) *corev1.Pod {
|
||||
return &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: defaultNamespace,
|
||||
Labels: map[string]string{"name": name},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Image: "nginx",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: corev1.PodStatus{
|
||||
Conditions: []corev1.PodCondition{
|
||||
{
|
||||
Type: corev1.PodReady,
|
||||
Status: podReadyCondition,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newPersistentVolumeClaim(name string, phase corev1.PersistentVolumeClaimPhase) *corev1.PersistentVolumeClaim {
|
||||
return &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: defaultNamespace,
|
||||
},
|
||||
Status: corev1.PersistentVolumeClaimStatus{
|
||||
Phase: phase,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newJob(name string, backoffLimit, completions, succeeded, failed int) *batchv1.Job {
|
||||
return &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: defaultNamespace,
|
||||
},
|
||||
Spec: batchv1.JobSpec{
|
||||
BackoffLimit: intToInt32(backoffLimit),
|
||||
Completions: intToInt32(completions),
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{"name": name},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Image: "nginx",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: batchv1.JobStatus{
|
||||
Succeeded: int32(succeeded),
|
||||
Failed: int32(failed),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func intToInt32(i int) *int32 {
|
||||
i32 := int32(i)
|
||||
return &i32
|
||||
}
|
Loading…
Reference in new issue