fix(kube): Fixes wait for jobs

This happened to be a bug we identified in Helm 3 and did not check if
it existed in Helm 2. The improved logic for job waiting used an automatic
retry. However, when we were creating the watcher, we were listing on everything
of that same api version and kind. So if you had more than 1 hook and the first
was successful, it would think everything was successful. I have validated that
this now fails as intended if a job is failing

Closes #6767

Signed-off-by: Taylor Thomas <taylor.thomas@microsoft.com>
pull/6907/head
Taylor Thomas 6 years ago
parent 751d172df3
commit 77c973422c

@ -837,7 +837,13 @@ func getSelectorFromObject(obj runtime.Object) (map[string]string, bool) {
}
func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error {
lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, fields.Everything())
// Use a selector on the name of the resource. This should be unique for the
// given version and kind
selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name))
if err != nil {
return err
}
lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector)
kind := info.Mapping.GroupVersionKind.Kind
c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout)
@ -850,7 +856,7 @@ func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) err
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err := watchtools.ListWatchUntil(ctx, lw, func(e watch.Event) (bool, error) {
_, err = watchtools.ListWatchUntil(ctx, lw, func(e watch.Event) (bool, error) {
switch e.Type {
case watch.Added, watch.Modified:
// For things like a secret or a config map, this is the best indicator

Loading…
Cancel
Save