Merge pull request #5961 from thomastaylor312/fix/wait

fix(pkg/kube): Fixes wait functionality
pull/5970/head
Taylor Thomas 6 years ago committed by GitHub
commit cf79b90867
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -47,9 +47,12 @@ func (w *waiter) waitForResources(created Result) error {
w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout) w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout)
return wait.Poll(2*time.Second, w.timeout, func() (bool, error) { return wait.Poll(2*time.Second, w.timeout, func() (bool, error) {
for _, v := range created[:0] { for _, v := range created {
var ( var (
ok bool // This defaults to true, otherwise we get to a point where
// things will always return false unless one of the objects
// that manages pods has been hit
ok = true
err error err error
) )
switch value := asVersioned(v).(type) { switch value := asVersioned(v).(type) {
@ -128,6 +131,10 @@ func (w *waiter) waitForResources(created Result) error {
} }
case *corev1.ReplicationController: case *corev1.ReplicationController:
ok, err = w.podsReadyForObject(value.Namespace, value) ok, err = w.podsReadyForObject(value.Namespace, value)
// TODO(Taylor): This works, but ends up with a possible race
// condition if some pods have not been scheduled yet. This logic
// should be refactored to do similar checks to what is done for
// Deployments
case *extensionsv1beta1.DaemonSet: case *extensionsv1beta1.DaemonSet:
ok, err = w.podsReadyForObject(value.Namespace, value) ok, err = w.podsReadyForObject(value.Namespace, value)
case *appsv1.DaemonSet: case *appsv1.DaemonSet:
@ -193,22 +200,17 @@ func (w *waiter) serviceReady(s *corev1.Service) bool {
if s.Spec.Type == corev1.ServiceTypeExternalName { if s.Spec.Type == corev1.ServiceTypeExternalName {
return true return true
} }
// Make sure the service is not explicitly set to "None" before checking the IP // Make sure the service is not explicitly set to "None" before checking the IP
if s.Spec.ClusterIP != corev1.ClusterIPNone && !isServiceIPSet(s) || if (s.Spec.ClusterIP != corev1.ClusterIPNone && s.Spec.ClusterIP == "") ||
// This checks if the service has a LoadBalancer and that balancer has an Ingress defined // This checks if the service has a LoadBalancer and that balancer has an Ingress defined
s.Spec.Type == corev1.ServiceTypeLoadBalancer && s.Status.LoadBalancer.Ingress == nil { (s.Spec.Type == corev1.ServiceTypeLoadBalancer && s.Status.LoadBalancer.Ingress == nil) {
w.log("Service is not ready: %s/%s", s.GetNamespace(), s.GetName()) w.log("Service is not ready: %s/%s", s.GetNamespace(), s.GetName())
return false return false
} }
return true return true
} }
// isServiceIPSet aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here
func isServiceIPSet(service *corev1.Service) bool {
return service.Spec.ClusterIP != corev1.ClusterIPNone && service.Spec.ClusterIP != ""
}
func (w *waiter) volumeReady(v *corev1.PersistentVolumeClaim) bool { func (w *waiter) volumeReady(v *corev1.PersistentVolumeClaim) bool {
if v.Status.Phase != corev1.ClaimBound { if v.Status.Phase != corev1.ClaimBound {
w.log("PersistentVolumeClaim is not ready: %s/%s", v.GetNamespace(), v.GetName()) w.log("PersistentVolumeClaim is not ready: %s/%s", v.GetNamespace(), v.GetName())

Loading…
Cancel
Save