diff --git a/go.mod b/go.mod index feefb8932..e70781ac5 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,7 @@ require ( k8s.io/kubectl v0.31.3 oras.land/oras-go v1.2.5 sigs.k8s.io/cli-utils v0.37.2 + sigs.k8s.io/controller-runtime v0.18.4 sigs.k8s.io/yaml v1.4.0 ) @@ -128,6 +129,7 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/onsi/gomega v1.33.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -185,7 +187,6 @@ require ( k8s.io/component-base v0.31.3 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect - sigs.k8s.io/controller-runtime v0.18.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.17.2 // indirect sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect diff --git a/pkg/kube/client.go b/pkg/kube/client.go index b1b1d4835..149017b17 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -37,12 +37,7 @@ import ( apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/aggregator" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/collector" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling/event" - "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" - "sigs.k8s.io/cli-utils/pkg/object" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" multierror "github.com/hashicorp/go-multierror" @@ -92,10 +87,12 @@ type Client struct { Namespace string kubeClient *kubernetes.Clientset - // Another potential option rather than having the waiter as a field - // would be to have a field that decides what type of waiter to use - // then instantiate it during the method - // of course the fields could take a waiter as well + // I see a couple different options for how waiter could be handled here + // - The waiter could be instantiated in New or at the start of each wait function // + // - The waiter could be completely separate from the client interface, + // I don't like that this causes consumers to need another interface on top of kube + // - The waiter could be bundled with the resource manager into a client object. The waiter doesn't need factory / + // Another option still would be to waiter Waiter } @@ -142,7 +139,7 @@ func getStatusWatcher(factory Factory) (watcher.StatusWatcher, error) { func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { if getter == nil { getter = genericclioptions.NewConfigFlags(true) - } + } factory := cmdutil.NewFactory(getter) if waiter == nil { sw, err := getStatusWatcher(factory) @@ -156,7 +153,7 @@ func New(getter genericclioptions.RESTClientGetter, waiter Waiter) *Client { return &Client{ Factory: factory, Log: nopLogger, - waiter: waiter, + waiter: waiter, } } @@ -338,62 +335,6 @@ func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { return c.waiter.Wait(resources, timeout) } -// WaitForReady waits for all of the objects to reach a ready state. -func WaitForReady(ctx context.Context, sw watcher.StatusWatcher, resourceList ResourceList) error { - cancelCtx, cancel := context.WithCancel(ctx) - defer cancel() - // TODO maybe a simpler way to transfer the objects - runtimeObjs := []runtime.Object{} - for _, resource := range resourceList { - runtimeObjs = append(runtimeObjs, resource.Object) - } - resources := []object.ObjMetadata{} - for _, runtimeObj := range runtimeObjs { - obj, err := object.RuntimeToObjMeta(runtimeObj) - if err != nil { - return err - } - resources = append(resources, obj) - } - eventCh := sw.Watch(cancelCtx, resources, watcher.Options{}) - statusCollector := collector.NewResourceStatusCollector(resources) - done := statusCollector.ListenWithObserver(eventCh, collector.ObserverFunc( - func(statusCollector *collector.ResourceStatusCollector, _ event.Event) { - rss := []*event.ResourceStatus{} - for _, rs := range statusCollector.ResourceStatuses { - if rs == nil { - continue - } - rss = append(rss, rs) - } - desired := status.CurrentStatus - if aggregator.AggregateStatus(rss, desired) == desired { - cancel() - return - } - }), - ) - <-done - - if statusCollector.Error != nil { - return statusCollector.Error - } - - // Only check parent context error, otherwise we would error when desired status is achieved. - if ctx.Err() != nil { - var err error - for _, id := range resources { - rs := statusCollector.ResourceStatuses[id] - if rs.Status == status.CurrentStatus { - continue - } - err = fmt.Errorf("%s: %s not ready, status: %s", rs.Identifier.Name, rs.Identifier.GroupKind.Kind, rs.Status) - } - return fmt.Errorf("not all resources ready: %w: %w", ctx.Err(), err) - } - return nil -} - // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) error { return c.waiter.WaitWithJobs(resources, timeout) diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index b12897121..de61a3862 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -24,7 +24,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -519,14 +518,16 @@ func TestWait(t *testing.T) { }), } cs, err := c.getKubeClient() - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) w := &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w + c.waiter = w resources, err := c.Build(objBody(&podList), false) if err != nil { t.Fatal(err) @@ -580,14 +581,16 @@ func TestWaitJob(t *testing.T) { }), } cs, err := c.getKubeClient() - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) w := &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w + c.waiter = w resources, err := c.Build(objBody(job), false) if err != nil { t.Fatal(err) @@ -643,14 +646,16 @@ func TestWaitDelete(t *testing.T) { }), } cs, err := c.getKubeClient() - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) w := &waiter{ c: checker, log: c.Log, timeout: time.Second * 30, } - c.waiter = w + c.waiter = w resources, err := c.Build(objBody(&pod), false) if err != nil { t.Fatal(err) diff --git a/pkg/kube/kready.go b/pkg/kube/kwait.go similarity index 95% rename from pkg/kube/kready.go rename to pkg/kube/kwait.go index c199eecc6..d74c913ea 100644 --- a/pkg/kube/kready.go +++ b/pkg/kube/kwait.go @@ -37,7 +37,8 @@ type kstatusWaiter struct { } func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) error { - ctx := context.TODO() + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() cancelCtx, cancel := context.WithCancel(ctx) defer cancel() // TODO maybe a simpler way to transfer the objects @@ -62,6 +63,7 @@ func (w *kstatusWaiter) Wait(resourceList ResourceList, timeout time.Duration) e if rs == nil { continue } + fmt.Println("this is the status of object", rs.Status) rss = append(rss, rs) } desired := status.CurrentStatus diff --git a/pkg/kube/kwait_test.go b/pkg/kube/kwait_test.go new file mode 100644 index 000000000..1d9a69959 --- /dev/null +++ b/pkg/kube/kwait_test.go @@ -0,0 +1,213 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/yaml" + dynamicfake "k8s.io/client-go/dynamic/fake" + "k8s.io/kubectl/pkg/scheme" + "sigs.k8s.io/cli-utils/pkg/kstatus/watcher" + "sigs.k8s.io/cli-utils/pkg/testutil" +) + +var podCurrentYaml = ` +apiVersion: v1 +kind: Pod +metadata: + name: good-pod + namespace: ns +status: + conditions: + - type: Ready + status: "True" + phase: Running +` + +var podYaml = ` +apiVersion: v1 +kind: Pod +metadata: + name: in-progress-pod + namespace: ns +` + +func TestRunHealthChecks(t *testing.T) { + t.Parallel() + tests := []struct { + name string + podYamls []string + expectErrs []error + }{ + { + name: "Pod is ready", + podYamls: []string{podCurrentYaml}, + expectErrs: nil, + }, + { + name: "one of the pods never becomes ready", + podYamls: []string{podYaml, podCurrentYaml}, + // TODO, make this better + expectErrs: []error{errors.New("not all resources ready: context deadline exceeded: in-progress-pod: Pod not ready, status: InProgress")}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := newTestClient(t) + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme) + fakeMapper := testutil.NewFakeRESTMapper( + v1.SchemeGroupVersion.WithKind("Pod"), + ) + // ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + // defer cancel() + pods := []runtime.Object{} + statusWatcher := watcher.NewDefaultStatusWatcher(fakeClient, fakeMapper) + for _, podYaml := range tt.podYamls { + m := make(map[string]interface{}) + err := yaml.Unmarshal([]byte(podYaml), &m) + require.NoError(t, err) + pod := &unstructured.Unstructured{Object: m} + pods = append(pods, pod) + fmt.Println(pod.GetName()) + podGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} + err = fakeClient.Tracker().Create(podGVR, pod, pod.GetNamespace()) + require.NoError(t, err) + } + c.waiter = &kstatusWaiter{ + sw: statusWatcher, + log: c.Log, + } + + resourceList := ResourceList{} + for _, pod := range pods { + list, err := c.Build(objBody(pod), false) + if err != nil { + t.Fatal(err) + } + resourceList = append(resourceList, list...) + } + + err := c.Wait(resourceList, time.Second*5) + if tt.expectErrs != nil { + require.EqualError(t, err, errors.Join(tt.expectErrs...).Error()) + return + } + require.NoError(t, err) + }) + } +} + +// func TestWait1(t *testing.T) { +// podList := newPodList("starfish", "otter", "squid") + +// var created *time.Time + +// c := newTestClient(t) +// c.Factory.(*cmdtesting.TestFactory).ClientConfigVal = cmdtesting.DefaultClientConfig() +// c.Factory.(*cmdtesting.TestFactory).Client = &fake.RESTClient{ +// NegotiatedSerializer: unstructuredSerializer, +// Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { +// p, m := req.URL.Path, req.Method +// t.Logf("got request %s %s", p, m) +// switch { +// case p == "/api/v1/namespaces/default/pods/starfish" && m == "GET": +// pod := &podList.Items[0] +// if created != nil && time.Since(*created) >= time.Second*5 { +// pod.Status.Conditions = []v1.PodCondition{ +// { +// Type: v1.PodReady, +// Status: v1.ConditionTrue, +// }, +// } +// } +// return newResponse(200, pod) +// case p == "/api/v1/namespaces/default/pods/otter" && m == "GET": +// pod := &podList.Items[1] +// if created != nil && time.Since(*created) >= time.Second*5 { +// pod.Status.Conditions = []v1.PodCondition{ +// { +// Type: v1.PodReady, +// Status: v1.ConditionTrue, +// }, +// } +// } +// return newResponse(200, pod) +// case p == "/api/v1/namespaces/default/pods/squid" && m == "GET": +// pod := &podList.Items[2] +// if created != nil && time.Since(*created) >= time.Second*5 { +// pod.Status.Conditions = []v1.PodCondition{ +// { +// Type: v1.PodReady, +// Status: v1.ConditionTrue, +// }, +// } +// } +// return newResponse(200, pod) +// case p == "/namespaces/default/pods" && m == "POST": +// resources, err := c.Build(req.Body, false) +// if err != nil { +// t.Fatal(err) +// } +// now := time.Now() +// created = &now +// return newResponse(200, resources[0].Object) +// default: +// t.Fatalf("unexpected request: %s %s", req.Method, req.URL.Path) +// return nil, nil +// } +// }), +// } +// cs, err := c.getKubeClient() +// require.NoError(t, err) +// checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) +// w := &waiter{ +// c: checker, +// log: c.Log, +// timeout: time.Second * 30, +// } +// c.waiter = w +// resources, err := c.Build(objBody(&podList), false) +// if err != nil { +// t.Fatal(err) +// } +// result, err := c.Create(resources) +// if err != nil { +// t.Fatal(err) +// } +// if len(result.Created) != 3 { +// t.Errorf("expected 3 resource created, got %d", len(result.Created)) +// } + +// if err := c.Wait(resources, time.Second*30); err != nil { +// t.Errorf("expected wait without error, got %s", err) +// } + +// if time.Since(*created) < time.Second*5 { +// t.Errorf("expected to wait at least 5 seconds before ready status was detected, but got %s", time.Since(*created)) +// } +// }